2024-12-10 03:33:26,121 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-10 03:33:26,135 main DEBUG Took 0.012166 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 03:33:26,136 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 03:33:26,136 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 03:33:26,137 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 03:33:26,138 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,145 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 03:33:26,159 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,161 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,161 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,162 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,163 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,163 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,164 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,165 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,166 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,166 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,167 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,168 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,168 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,169 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,169 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,170 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,170 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,171 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,171 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,172 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,172 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,173 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,173 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,174 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 03:33:26,174 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,175 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 03:33:26,177 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 03:33:26,179 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 03:33:26,181 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 03:33:26,182 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 03:33:26,184 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 03:33:26,185 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 03:33:26,195 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 03:33:26,198 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 03:33:26,200 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 03:33:26,200 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 03:33:26,201 main DEBUG createAppenders(={Console}) 2024-12-10 03:33:26,202 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-10 03:33:26,202 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-10 03:33:26,202 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-10 03:33:26,203 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 03:33:26,203 main DEBUG OutputStream closed 2024-12-10 03:33:26,203 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 03:33:26,203 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 03:33:26,204 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-10 03:33:26,267 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 03:33:26,269 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 03:33:26,270 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 03:33:26,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 03:33:26,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 03:33:26,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 03:33:26,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 03:33:26,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 03:33:26,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 03:33:26,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 03:33:26,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 03:33:26,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 03:33:26,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 03:33:26,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 03:33:26,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 03:33:26,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 03:33:26,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 03:33:26,275 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 03:33:26,277 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 03:33:26,277 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-10 03:33:26,278 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 03:33:26,278 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-10T03:33:26,475 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7 2024-12-10 03:33:26,477 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 03:33:26,478 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T03:33:26,486 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-10T03:33:26,503 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T03:33:26,506 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9, deleteOnExit=true 2024-12-10T03:33:26,506 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-10T03:33:26,507 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/test.cache.data in system properties and HBase conf 2024-12-10T03:33:26,507 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T03:33:26,508 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/hadoop.log.dir in system properties and HBase conf 2024-12-10T03:33:26,508 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T03:33:26,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T03:33:26,509 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-10T03:33:26,585 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T03:33:26,667 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T03:33:26,671 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T03:33:26,671 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T03:33:26,671 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T03:33:26,672 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T03:33:26,672 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T03:33:26,672 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T03:33:26,673 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T03:33:26,673 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T03:33:26,673 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T03:33:26,674 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/nfs.dump.dir in system properties and HBase conf 2024-12-10T03:33:26,674 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/java.io.tmpdir in system properties and HBase conf 2024-12-10T03:33:26,674 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T03:33:26,675 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T03:33:26,675 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T03:33:27,566 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T03:33:27,625 INFO [Time-limited test {}] log.Log(170): Logging initialized @2129ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T03:33:27,681 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T03:33:27,732 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T03:33:27,747 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T03:33:27,747 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T03:33:27,748 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T03:33:27,758 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T03:33:27,760 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/hadoop.log.dir/,AVAILABLE} 2024-12-10T03:33:27,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T03:33:27,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/java.io.tmpdir/jetty-localhost-44099-hadoop-hdfs-3_4_1-tests_jar-_-any-8346889889672540221/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T03:33:27,921 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:44099} 2024-12-10T03:33:27,921 INFO [Time-limited test {}] server.Server(415): Started @2426ms 2024-12-10T03:33:28,364 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T03:33:28,371 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T03:33:28,372 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T03:33:28,373 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T03:33:28,373 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T03:33:28,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/hadoop.log.dir/,AVAILABLE} 2024-12-10T03:33:28,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T03:33:28,469 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/java.io.tmpdir/jetty-localhost-40469-hadoop-hdfs-3_4_1-tests_jar-_-any-2651720010542096617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T03:33:28,470 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:40469} 2024-12-10T03:33:28,470 INFO [Time-limited test {}] server.Server(415): Started @2974ms 2024-12-10T03:33:28,514 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T03:33:29,211 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/dfs/data/data2/current/BP-727406365-172.17.0.2-1733801607156/current, will proceed with Du for space computation calculation, 2024-12-10T03:33:29,211 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/dfs/data/data1/current/BP-727406365-172.17.0.2-1733801607156/current, will proceed with Du for space computation calculation, 2024-12-10T03:33:29,235 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T03:33:29,276 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc21a6c4138d49b48 with lease ID 0xf30d3614e5c0b932: Processing first storage report for DS-f73146b4-c69b-4f1f-abb5-b7ef7c015653 from datanode DatanodeRegistration(127.0.0.1:36539, datanodeUuid=f948c425-565d-4207-99fb-2ad82d3071d6, infoPort=35539, infoSecurePort=0, ipcPort=40639, storageInfo=lv=-57;cid=testClusterID;nsid=387720282;c=1733801607156) 2024-12-10T03:33:29,277 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc21a6c4138d49b48 with lease ID 0xf30d3614e5c0b932: from storage DS-f73146b4-c69b-4f1f-abb5-b7ef7c015653 node DatanodeRegistration(127.0.0.1:36539, datanodeUuid=f948c425-565d-4207-99fb-2ad82d3071d6, infoPort=35539, infoSecurePort=0, ipcPort=40639, storageInfo=lv=-57;cid=testClusterID;nsid=387720282;c=1733801607156), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-10T03:33:29,278 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc21a6c4138d49b48 with lease ID 0xf30d3614e5c0b932: Processing first storage report for DS-fe4d9541-887b-4c29-8245-d5068af6c9a3 from datanode DatanodeRegistration(127.0.0.1:36539, datanodeUuid=f948c425-565d-4207-99fb-2ad82d3071d6, infoPort=35539, infoSecurePort=0, ipcPort=40639, storageInfo=lv=-57;cid=testClusterID;nsid=387720282;c=1733801607156) 2024-12-10T03:33:29,278 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc21a6c4138d49b48 with lease ID 0xf30d3614e5c0b932: from storage DS-fe4d9541-887b-4c29-8245-d5068af6c9a3 node DatanodeRegistration(127.0.0.1:36539, datanodeUuid=f948c425-565d-4207-99fb-2ad82d3071d6, infoPort=35539, infoSecurePort=0, ipcPort=40639, storageInfo=lv=-57;cid=testClusterID;nsid=387720282;c=1733801607156), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T03:33:29,375 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7 2024-12-10T03:33:29,435 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/zookeeper_0, clientPort=51621, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T03:33:29,442 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51621 2024-12-10T03:33:29,451 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T03:33:29,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T03:33:29,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741825_1001 (size=7) 2024-12-10T03:33:30,057 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a with version=8 2024-12-10T03:33:30,057 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/hbase-staging 2024-12-10T03:33:30,159 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T03:33:30,380 INFO [Time-limited test {}] client.ConnectionUtils(129): master/50b9ef1c5472:0 server-side Connection retries=45 2024-12-10T03:33:30,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T03:33:30,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T03:33:30,395 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T03:33:30,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T03:33:30,396 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T03:33:30,498 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T03:33:30,546 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T03:33:30,554 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T03:33:30,557 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T03:33:30,578 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 38533 (auto-detected) 2024-12-10T03:33:30,579 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-10T03:33:30,596 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42969 2024-12-10T03:33:30,603 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T03:33:30,604 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T03:33:30,615 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:42969 connecting to ZooKeeper ensemble=127.0.0.1:51621 2024-12-10T03:33:30,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429690x0, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T03:33:30,711 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42969-0x1000e00254c0000 connected 2024-12-10T03:33:30,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T03:33:30,795 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T03:33:30,800 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T03:33:30,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42969 2024-12-10T03:33:30,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42969 2024-12-10T03:33:30,805 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42969 2024-12-10T03:33:30,806 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42969 2024-12-10T03:33:30,806 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42969 2024-12-10T03:33:30,812 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a, hbase.cluster.distributed=false 2024-12-10T03:33:30,862 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/50b9ef1c5472:0 server-side Connection retries=45 2024-12-10T03:33:30,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T03:33:30,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T03:33:30,863 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T03:33:30,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T03:33:30,863 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T03:33:30,865 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T03:33:30,867 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T03:33:30,868 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37553 2024-12-10T03:33:30,869 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T03:33:30,874 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T03:33:30,875 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T03:33:30,877 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T03:33:30,881 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:37553 connecting to ZooKeeper ensemble=127.0.0.1:51621 2024-12-10T03:33:30,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375530x0, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T03:33:30,893 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:375530x0, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T03:33:30,893 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37553-0x1000e00254c0001 connected 2024-12-10T03:33:30,894 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T03:33:30,895 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T03:33:30,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37553 2024-12-10T03:33:30,896 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37553 2024-12-10T03:33:30,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37553 2024-12-10T03:33:30,897 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37553 2024-12-10T03:33:30,898 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37553 2024-12-10T03:33:30,900 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/50b9ef1c5472,42969,1733801610154 2024-12-10T03:33:30,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T03:33:30,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T03:33:30,914 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/50b9ef1c5472,42969,1733801610154 2024-12-10T03:33:30,914 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;50b9ef1c5472:42969 2024-12-10T03:33:30,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T03:33:30,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T03:33:30,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:30,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:30,938 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T03:33:30,939 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/50b9ef1c5472,42969,1733801610154 from backup master directory 2024-12-10T03:33:30,939 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T03:33:30,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T03:33:30,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/50b9ef1c5472,42969,1733801610154 2024-12-10T03:33:30,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T03:33:30,951 WARN [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T03:33:30,951 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=50b9ef1c5472,42969,1733801610154 2024-12-10T03:33:30,953 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T03:33:30,954 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T03:33:31,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741826_1002 (size=42) 2024-12-10T03:33:31,421 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/hbase.id with ID: 96c3a9d2-3fa0-4706-bd88-bf18c711ef04 2024-12-10T03:33:31,462 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T03:33:31,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:31,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:31,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741827_1003 (size=196) 2024-12-10T03:33:31,962 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:33:31,963 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T03:33:31,977 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:31,980 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T03:33:32,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741828_1004 (size=1189) 2024-12-10T03:33:32,429 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store 2024-12-10T03:33:32,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741829_1005 (size=34) 2024-12-10T03:33:32,852 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T03:33:32,853 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:33:32,854 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T03:33:32,854 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T03:33:32,854 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T03:33:32,855 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-12-10T03:33:32,855 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T03:33:32,855 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T03:33:32,855 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T03:33:32,857 WARN [master/50b9ef1c5472:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/.initializing 2024-12-10T03:33:32,857 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/WALs/50b9ef1c5472,42969,1733801610154 2024-12-10T03:33:32,862 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T03:33:32,871 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=50b9ef1c5472%2C42969%2C1733801610154, suffix=, logDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/WALs/50b9ef1c5472,42969,1733801610154, archiveDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/oldWALs, maxLogs=10 2024-12-10T03:33:32,889 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/WALs/50b9ef1c5472,42969,1733801610154/50b9ef1c5472%2C42969%2C1733801610154.1733801612875, exclude list is [], retry=0 2024-12-10T03:33:32,903 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36539,DS-f73146b4-c69b-4f1f-abb5-b7ef7c015653,DISK] 2024-12-10T03:33:32,906 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T03:33:32,935 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/WALs/50b9ef1c5472,42969,1733801610154/50b9ef1c5472%2C42969%2C1733801610154.1733801612875 2024-12-10T03:33:32,936 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35539:35539)] 2024-12-10T03:33:32,937 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:33:32,937 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:33:32,940 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:32,941 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:32,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:32,989 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T03:33:32,992 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:32,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T03:33:32,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:32,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T03:33:32,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:33,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:33:33,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:33,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T03:33:33,004 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:33,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:33:33,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:33,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T03:33:33,009 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:33,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:33:33,013 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:33,014 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:33,022 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T03:33:33,025 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T03:33:33,029 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:33:33,030 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64820732, jitterRate=-0.034095823764801025}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T03:33:33,033 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T03:33:33,034 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T03:33:33,059 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d1330fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:33,086 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-10T03:33:33,095 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T03:33:33,095 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T03:33:33,097 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T03:33:33,099 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-10T03:33:33,103 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-10T03:33:33,103 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T03:33:33,125 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T03:33:33,135 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T03:33:33,178 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-10T03:33:33,181 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T03:33:33,182 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T03:33:33,192 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-10T03:33:33,194 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T03:33:33,199 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T03:33:33,208 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-10T03:33:33,209 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T03:33:33,220 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T03:33:33,232 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T03:33:33,241 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T03:33:33,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T03:33:33,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T03:33:33,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:33,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:33,254 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=50b9ef1c5472,42969,1733801610154, sessionid=0x1000e00254c0000, setting cluster-up flag (Was=false) 2024-12-10T03:33:33,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:33,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:33,312 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T03:33:33,316 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=50b9ef1c5472,42969,1733801610154 2024-12-10T03:33:33,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:33,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:33,361 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T03:33:33,363 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=50b9ef1c5472,42969,1733801610154 2024-12-10T03:33:33,413 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;50b9ef1c5472:37553 2024-12-10T03:33:33,414 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1008): ClusterId : 96c3a9d2-3fa0-4706-bd88-bf18c711ef04 2024-12-10T03:33:33,416 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T03:33:33,427 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T03:33:33,427 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T03:33:33,433 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-10T03:33:33,438 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T03:33:33,438 DEBUG [RS:0;50b9ef1c5472:37553 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341df987, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:33,438 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-10T03:33:33,440 DEBUG [RS:0;50b9ef1c5472:37553 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64905898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=50b9ef1c5472/172.17.0.2:0 2024-12-10T03:33:33,441 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T03:33:33,442 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-10T03:33:33,442 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-10T03:33:33,442 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-10T03:33:33,444 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(3073): reportForDuty to master=50b9ef1c5472,42969,1733801610154 with isa=50b9ef1c5472/172.17.0.2:37553, startcode=1733801610862 2024-12-10T03:33:33,446 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 50b9ef1c5472,42969,1733801610154 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T03:33:33,450 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/50b9ef1c5472:0, corePoolSize=5, maxPoolSize=5 2024-12-10T03:33:33,450 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/50b9ef1c5472:0, corePoolSize=5, maxPoolSize=5 2024-12-10T03:33:33,450 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/50b9ef1c5472:0, corePoolSize=5, maxPoolSize=5 2024-12-10T03:33:33,450 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/50b9ef1c5472:0, corePoolSize=5, maxPoolSize=5 2024-12-10T03:33:33,451 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/50b9ef1c5472:0, corePoolSize=10, maxPoolSize=10 2024-12-10T03:33:33,451 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,451 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/50b9ef1c5472:0, corePoolSize=2, maxPoolSize=2 2024-12-10T03:33:33,451 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,454 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733801643454 2024-12-10T03:33:33,455 DEBUG [RS:0;50b9ef1c5472:37553 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T03:33:33,456 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T03:33:33,456 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-10T03:33:33,457 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-10T03:33:33,457 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T03:33:33,460 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:33,460 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T03:33:33,461 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T03:33:33,461 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T03:33:33,462 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T03:33:33,462 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T03:33:33,463 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,465 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T03:33:33,466 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T03:33:33,466 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T03:33:33,468 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T03:33:33,469 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T03:33:33,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741831_1007 (size=1039) 2024-12-10T03:33:33,472 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/50b9ef1c5472:0:becomeActiveMaster-HFileCleaner.large.0-1733801613470,5,FailOnTimeoutGroup] 2024-12-10T03:33:33,476 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/50b9ef1c5472:0:becomeActiveMaster-HFileCleaner.small.0-1733801613472,5,FailOnTimeoutGroup] 2024-12-10T03:33:33,476 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,476 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T03:33:33,480 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,480 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,487 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39717, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T03:33:33,492 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42969 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:33,493 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42969 {}] master.ServerManager(486): Registering regionserver=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:33,505 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:33:33,505 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33939 2024-12-10T03:33:33,505 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-10T03:33:33,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T03:33:33,517 DEBUG [RS:0;50b9ef1c5472:37553 {}] zookeeper.ZKUtil(111): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:33,517 WARN [RS:0;50b9ef1c5472:37553 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T03:33:33,518 INFO [RS:0;50b9ef1c5472:37553 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T03:33:33,518 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/WALs/50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:33,520 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [50b9ef1c5472,37553,1733801610862] 2024-12-10T03:33:33,534 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-10T03:33:33,544 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T03:33:33,554 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T03:33:33,557 INFO [RS:0;50b9ef1c5472:37553 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T03:33:33,557 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,558 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-10T03:33:33,563 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,564 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,564 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,564 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,564 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,564 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,564 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/50b9ef1c5472:0, corePoolSize=2, maxPoolSize=2 2024-12-10T03:33:33,564 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,564 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,565 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,565 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,565 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/50b9ef1c5472:0, corePoolSize=1, maxPoolSize=1 2024-12-10T03:33:33,565 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/50b9ef1c5472:0, corePoolSize=3, maxPoolSize=3 2024-12-10T03:33:33,565 DEBUG [RS:0;50b9ef1c5472:37553 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0, corePoolSize=3, maxPoolSize=3 2024-12-10T03:33:33,566 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,566 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,566 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,566 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,566 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(168): Chore ScheduledChore name=50b9ef1c5472,37553,1733801610862-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T03:33:33,582 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T03:33:33,584 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(168): Chore ScheduledChore name=50b9ef1c5472,37553,1733801610862-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:33,600 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.Replication(204): 50b9ef1c5472,37553,1733801610862 started 2024-12-10T03:33:33,600 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1767): Serving as 50b9ef1c5472,37553,1733801610862, RpcServer on 50b9ef1c5472/172.17.0.2:37553, sessionid=0x1000e00254c0001 2024-12-10T03:33:33,600 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T03:33:33,601 DEBUG [RS:0;50b9ef1c5472:37553 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:33,601 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '50b9ef1c5472,37553,1733801610862' 2024-12-10T03:33:33,601 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T03:33:33,602 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T03:33:33,602 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T03:33:33,602 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T03:33:33,602 DEBUG [RS:0;50b9ef1c5472:37553 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:33,602 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '50b9ef1c5472,37553,1733801610862' 2024-12-10T03:33:33,603 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T03:33:33,603 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T03:33:33,604 DEBUG [RS:0;50b9ef1c5472:37553 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T03:33:33,604 INFO [RS:0;50b9ef1c5472:37553 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T03:33:33,604 INFO [RS:0;50b9ef1c5472:37553 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T03:33:33,712 INFO [RS:0;50b9ef1c5472:37553 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T03:33:33,716 INFO [RS:0;50b9ef1c5472:37553 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=50b9ef1c5472%2C37553%2C1733801610862, suffix=, logDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/WALs/50b9ef1c5472,37553,1733801610862, archiveDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/oldWALs, maxLogs=32 2024-12-10T03:33:33,731 DEBUG [RS:0;50b9ef1c5472:37553 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/WALs/50b9ef1c5472,37553,1733801610862/50b9ef1c5472%2C37553%2C1733801610862.1733801613718, exclude list is [], retry=0 2024-12-10T03:33:33,735 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36539,DS-f73146b4-c69b-4f1f-abb5-b7ef7c015653,DISK] 2024-12-10T03:33:33,740 INFO [RS:0;50b9ef1c5472:37553 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/WALs/50b9ef1c5472,37553,1733801610862/50b9ef1c5472%2C37553%2C1733801610862.1733801613718 2024-12-10T03:33:33,740 DEBUG [RS:0;50b9ef1c5472:37553 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35539:35539)] 2024-12-10T03:33:33,873 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-10T03:33:33,874 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:33:33,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741833_1009 (size=32) 2024-12-10T03:33:34,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:33:34,299 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T03:33:34,302 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T03:33:34,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:34,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T03:33:34,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T03:33:34,308 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T03:33:34,308 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:34,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T03:33:34,309 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T03:33:34,312 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T03:33:34,312 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:34,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T03:33:34,314 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740 2024-12-10T03:33:34,315 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740 2024-12-10T03:33:34,318 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:33:34,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-10T03:33:34,324 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:33:34,325 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70374595, jitterRate=0.0486631840467453}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:33:34,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-10T03:33:34,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-10T03:33:34,327 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-10T03:33:34,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-10T03:33:34,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T03:33:34,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T03:33:34,328 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-10T03:33:34,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-10T03:33:34,330 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-10T03:33:34,331 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-10T03:33:34,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T03:33:34,342 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T03:33:34,344 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T03:33:34,499 DEBUG [50b9ef1c5472:42969 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-10T03:33:34,504 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:34,509 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 50b9ef1c5472,37553,1733801610862, state=OPENING 2024-12-10T03:33:34,553 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T03:33:34,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:34,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:34,563 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T03:33:34,563 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T03:33:34,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:33:34,741 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:34,743 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T03:33:34,746 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T03:33:34,756 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-10T03:33:34,757 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T03:33:34,757 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T03:33:34,761 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=50b9ef1c5472%2C37553%2C1733801610862.meta, suffix=.meta, logDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/WALs/50b9ef1c5472,37553,1733801610862, archiveDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/oldWALs, maxLogs=32 2024-12-10T03:33:34,775 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/WALs/50b9ef1c5472,37553,1733801610862/50b9ef1c5472%2C37553%2C1733801610862.meta.1733801614763.meta, exclude list is [], retry=0 2024-12-10T03:33:34,779 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36539,DS-f73146b4-c69b-4f1f-abb5-b7ef7c015653,DISK] 2024-12-10T03:33:34,782 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/WALs/50b9ef1c5472,37553,1733801610862/50b9ef1c5472%2C37553%2C1733801610862.meta.1733801614763.meta 2024-12-10T03:33:34,782 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35539:35539)] 2024-12-10T03:33:34,782 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:33:34,784 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T03:33:34,837 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T03:33:34,842 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T03:33:34,845 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T03:33:34,845 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:33:34,846 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-10T03:33:34,846 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-10T03:33:34,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T03:33:34,850 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T03:33:34,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:34,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T03:33:34,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T03:33:34,854 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T03:33:34,854 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:34,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T03:33:34,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T03:33:34,857 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T03:33:34,857 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:34,858 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T03:33:34,860 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740 2024-12-10T03:33:34,863 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740 2024-12-10T03:33:34,865 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:33:34,868 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-10T03:33:34,869 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59990136, jitterRate=-0.10607731342315674}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:33:34,871 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-10T03:33:34,877 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733801614737 2024-12-10T03:33:34,886 DEBUG [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T03:33:34,887 INFO [RS_OPEN_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-10T03:33:34,888 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:34,890 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 50b9ef1c5472,37553,1733801610862, state=OPEN 2024-12-10T03:33:34,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T03:33:34,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T03:33:34,916 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T03:33:34,916 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T03:33:34,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T03:33:34,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=50b9ef1c5472,37553,1733801610862 in 350 msec 2024-12-10T03:33:34,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T03:33:34,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 588 msec 2024-12-10T03:33:34,934 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5380 sec 2024-12-10T03:33:34,934 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733801614934, completionTime=-1 2024-12-10T03:33:34,935 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-10T03:33:34,935 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-10T03:33:34,964 DEBUG [hconnection-0x2dd385a3-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:34,966 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:34,975 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-10T03:33:34,975 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733801674975 2024-12-10T03:33:34,975 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733801734975 2024-12-10T03:33:34,975 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 40 msec 2024-12-10T03:33:35,019 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=50b9ef1c5472,42969,1733801610154-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:35,020 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=50b9ef1c5472,42969,1733801610154-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:35,020 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=50b9ef1c5472,42969,1733801610154-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:35,021 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-50b9ef1c5472:42969, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:35,022 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:35,028 DEBUG [master/50b9ef1c5472:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-10T03:33:35,030 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-10T03:33:35,031 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T03:33:35,036 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-10T03:33:35,038 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T03:33:35,039 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:35,041 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T03:33:35,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741835_1011 (size=358) 2024-12-10T03:33:35,458 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c325cc16a4923122e0562a36b998907c, NAME => 'hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:33:35,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741836_1012 (size=42) 2024-12-10T03:33:35,871 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:33:35,871 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing c325cc16a4923122e0562a36b998907c, disabling compactions & flushes 2024-12-10T03:33:35,871 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:33:35,872 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:33:35,872 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. after waiting 0 ms 2024-12-10T03:33:35,872 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:33:35,872 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:33:35,872 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for c325cc16a4923122e0562a36b998907c: 2024-12-10T03:33:35,875 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T03:33:35,882 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733801615876"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733801615876"}]},"ts":"1733801615876"} 2024-12-10T03:33:35,901 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T03:33:35,902 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T03:33:35,905 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801615902"}]},"ts":"1733801615902"} 2024-12-10T03:33:35,908 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-10T03:33:35,955 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c325cc16a4923122e0562a36b998907c, ASSIGN}] 2024-12-10T03:33:35,957 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=c325cc16a4923122e0562a36b998907c, ASSIGN 2024-12-10T03:33:35,959 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=c325cc16a4923122e0562a36b998907c, ASSIGN; state=OFFLINE, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=false 2024-12-10T03:33:36,110 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c325cc16a4923122e0562a36b998907c, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:36,119 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure c325cc16a4923122e0562a36b998907c, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:33:36,276 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:36,289 INFO [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:33:36,290 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => c325cc16a4923122e0562a36b998907c, NAME => 'hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:33:36,290 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace c325cc16a4923122e0562a36b998907c 2024-12-10T03:33:36,290 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:33:36,290 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for c325cc16a4923122e0562a36b998907c 2024-12-10T03:33:36,291 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for c325cc16a4923122e0562a36b998907c 2024-12-10T03:33:36,293 INFO [StoreOpener-c325cc16a4923122e0562a36b998907c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c325cc16a4923122e0562a36b998907c 2024-12-10T03:33:36,296 INFO [StoreOpener-c325cc16a4923122e0562a36b998907c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c325cc16a4923122e0562a36b998907c columnFamilyName info 2024-12-10T03:33:36,296 DEBUG [StoreOpener-c325cc16a4923122e0562a36b998907c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:36,296 INFO [StoreOpener-c325cc16a4923122e0562a36b998907c-1 {}] regionserver.HStore(327): Store=c325cc16a4923122e0562a36b998907c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:33:36,298 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c 2024-12-10T03:33:36,299 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c 2024-12-10T03:33:36,303 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for c325cc16a4923122e0562a36b998907c 2024-12-10T03:33:36,308 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:33:36,309 INFO [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened c325cc16a4923122e0562a36b998907c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59012035, jitterRate=-0.12065215408802032}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T03:33:36,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for c325cc16a4923122e0562a36b998907c: 2024-12-10T03:33:36,313 INFO [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c., pid=6, masterSystemTime=1733801616276 2024-12-10T03:33:36,316 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:33:36,317 INFO [RS_OPEN_PRIORITY_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:33:36,318 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=c325cc16a4923122e0562a36b998907c, regionState=OPEN, openSeqNum=2, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:36,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T03:33:36,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure c325cc16a4923122e0562a36b998907c, server=50b9ef1c5472,37553,1733801610862 in 202 msec 2024-12-10T03:33:36,328 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T03:33:36,328 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=c325cc16a4923122e0562a36b998907c, ASSIGN in 371 msec 2024-12-10T03:33:36,329 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T03:33:36,330 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801616330"}]},"ts":"1733801616330"} 2024-12-10T03:33:36,332 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-10T03:33:36,372 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-10T03:33:36,372 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T03:33:36,377 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3410 sec 2024-12-10T03:33:36,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:36,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-10T03:33:36,378 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:33:36,405 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-10T03:33:36,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-10T03:33:36,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 37 msec 2024-12-10T03:33:36,450 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-10T03:33:36,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-10T03:33:36,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 31 msec 2024-12-10T03:33:36,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-10T03:33:36,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-10T03:33:36,529 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.578sec 2024-12-10T03:33:36,532 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T03:33:36,535 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T03:33:36,536 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T03:33:36,536 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T03:33:36,536 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T03:33:36,537 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=50b9ef1c5472,42969,1733801610154-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T03:33:36,538 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=50b9ef1c5472,42969,1733801610154-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T03:33:36,544 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-10T03:33:36,545 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T03:33:36,545 INFO [master/50b9ef1c5472:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=50b9ef1c5472,42969,1733801610154-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T03:33:36,625 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-12-10T03:33:36,625 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-10T03:33:36,638 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:36,641 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T03:33:36,641 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T03:33:36,651 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:36,659 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:36,666 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=50b9ef1c5472,42969,1733801610154 2024-12-10T03:33:36,679 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=98, ProcessCount=11, AvailableMemoryMB=4013 2024-12-10T03:33:36,705 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T03:33:36,710 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T03:33:36,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:33:36,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:33:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T03:33:36,728 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T03:33:36,729 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:36,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-10T03:33:36,734 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T03:33:36,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T03:33:36,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741837_1013 (size=963) 2024-12-10T03:33:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T03:33:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T03:33:37,148 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:33:37,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741838_1014 (size=53) 2024-12-10T03:33:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T03:33:37,562 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:33:37,562 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c1d70420fe32ca56286cb5d739cf351d, disabling compactions & flushes 2024-12-10T03:33:37,563 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:37,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:37,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. after waiting 0 ms 2024-12-10T03:33:37,563 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:37,563 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:37,564 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:37,568 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T03:33:37,568 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733801617568"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733801617568"}]},"ts":"1733801617568"} 2024-12-10T03:33:37,572 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T03:33:37,574 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T03:33:37,575 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801617574"}]},"ts":"1733801617574"} 2024-12-10T03:33:37,578 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T03:33:37,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1d70420fe32ca56286cb5d739cf351d, ASSIGN}] 2024-12-10T03:33:37,630 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1d70420fe32ca56286cb5d739cf351d, ASSIGN 2024-12-10T03:33:37,633 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1d70420fe32ca56286cb5d739cf351d, ASSIGN; state=OFFLINE, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=false 2024-12-10T03:33:37,784 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=c1d70420fe32ca56286cb5d739cf351d, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:37,790 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:33:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T03:33:37,946 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:37,959 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:37,959 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:33:37,959 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,960 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:33:37,960 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,960 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,963 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,968 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:33:37,968 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1d70420fe32ca56286cb5d739cf351d columnFamilyName A 2024-12-10T03:33:37,968 DEBUG [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:37,970 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.HStore(327): Store=c1d70420fe32ca56286cb5d739cf351d/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:33:37,970 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,973 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:33:37,973 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1d70420fe32ca56286cb5d739cf351d columnFamilyName B 2024-12-10T03:33:37,973 DEBUG [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:37,974 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.HStore(327): Store=c1d70420fe32ca56286cb5d739cf351d/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:33:37,974 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,977 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:33:37,977 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1d70420fe32ca56286cb5d739cf351d columnFamilyName C 2024-12-10T03:33:37,977 DEBUG [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:33:37,978 INFO [StoreOpener-c1d70420fe32ca56286cb5d739cf351d-1 {}] regionserver.HStore(327): Store=c1d70420fe32ca56286cb5d739cf351d/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:33:37,978 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:37,980 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,981 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,983 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:33:37,985 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:37,989 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:33:37,989 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened c1d70420fe32ca56286cb5d739cf351d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72234957, jitterRate=0.07638473808765411}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:33:37,990 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:37,992 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., pid=11, masterSystemTime=1733801617946 2024-12-10T03:33:37,994 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:37,995 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:37,995 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=c1d70420fe32ca56286cb5d739cf351d, regionState=OPEN, openSeqNum=2, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:38,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-10T03:33:38,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 in 208 msec 2024-12-10T03:33:38,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-10T03:33:38,005 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1d70420fe32ca56286cb5d739cf351d, ASSIGN in 373 msec 2024-12-10T03:33:38,006 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T03:33:38,006 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801618006"}]},"ts":"1733801618006"} 2024-12-10T03:33:38,008 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T03:33:38,021 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T03:33:38,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2980 sec 2024-12-10T03:33:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T03:33:38,869 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-10T03:33:38,875 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-12-10T03:33:38,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:38,925 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:38,929 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:38,933 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T03:33:38,936 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54828, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T03:33:38,943 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-12-10T03:33:38,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:38,955 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-12-10T03:33:38,963 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:38,964 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-12-10T03:33:38,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:38,978 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-12-10T03:33:38,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:38,990 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-12-10T03:33:39,005 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:39,008 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-12-10T03:33:39,021 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:39,022 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-12-10T03:33:39,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:39,037 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-12-10T03:33:39,046 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:39,048 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-12-10T03:33:39,060 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:33:39,066 DEBUG [hconnection-0x106fdee5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,068 DEBUG [hconnection-0x293c9f96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,068 DEBUG [hconnection-0x38c2ffb0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,069 DEBUG [hconnection-0x1127e63d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,069 DEBUG [hconnection-0x522bb58c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,069 DEBUG [hconnection-0x32d9f7e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,070 DEBUG [hconnection-0x624e92fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,071 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49122, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,071 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49146, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,071 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49132, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,071 DEBUG [hconnection-0x7c3cfe41-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,072 DEBUG [hconnection-0x2ce6d49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:33:39,074 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:39,075 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49162, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,077 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49168, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,081 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,081 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-10T03:33:39,085 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:33:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T03:33:39,087 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:39,089 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:39,090 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:39,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:33:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:39,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:39,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:39,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:39,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:39,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:39,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T03:33:39,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/df0e075df11542689ac4a29fc43a6731 is 50, key is test_row_0/A:col10/1733801619127/Put/seqid=0 2024-12-10T03:33:39,251 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T03:33:39,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:39,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741839_1015 (size=12001) 2024-12-10T03:33:39,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801679276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801679278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801679271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/df0e075df11542689ac4a29fc43a6731 2024-12-10T03:33:39,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801679284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801679286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/89e69143874a4e8f839544c7cfe019bb is 50, key is test_row_0/B:col10/1733801619127/Put/seqid=0 2024-12-10T03:33:39,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T03:33:39,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741840_1016 (size=12001) 2024-12-10T03:33:39,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/89e69143874a4e8f839544c7cfe019bb 2024-12-10T03:33:39,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801679418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801679420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801679420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801679421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801679421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,439 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T03:33:39,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/2c3f7fb454644d62aad8c64e38755144 is 50, key is test_row_0/C:col10/1733801619127/Put/seqid=0 2024-12-10T03:33:39,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:39,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741841_1017 (size=12001) 2024-12-10T03:33:39,540 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-10T03:33:39,542 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T03:33:39,543 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-10T03:33:39,610 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T03:33:39,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:39,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801679628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801679626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801679629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801679629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801679635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T03:33:39,782 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T03:33:39,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:39,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/2c3f7fb454644d62aad8c64e38755144 2024-12-10T03:33:39,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/df0e075df11542689ac4a29fc43a6731 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/df0e075df11542689ac4a29fc43a6731 2024-12-10T03:33:39,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/df0e075df11542689ac4a29fc43a6731, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T03:33:39,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/89e69143874a4e8f839544c7cfe019bb as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/89e69143874a4e8f839544c7cfe019bb 2024-12-10T03:33:39,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/89e69143874a4e8f839544c7cfe019bb, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T03:33:39,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/2c3f7fb454644d62aad8c64e38755144 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2c3f7fb454644d62aad8c64e38755144 2024-12-10T03:33:39,937 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T03:33:39,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:39,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:39,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:39,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2c3f7fb454644d62aad8c64e38755144, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T03:33:39,955 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c1d70420fe32ca56286cb5d739cf351d in 823ms, sequenceid=13, compaction requested=false 2024-12-10T03:33:39,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:39,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T03:33:39,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:39,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:39,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:39,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:39,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:39,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:39,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/78d8c89704af4b15af2936c0c535197d is 50, key is test_row_0/A:col10/1733801619278/Put/seqid=0 2024-12-10T03:33:39,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741842_1018 (size=12001) 2024-12-10T03:33:39,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801679968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801679971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801679979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801679983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801679986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:39,995 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/78d8c89704af4b15af2936c0c535197d 2024-12-10T03:33:40,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/80c1555cb9a948dcad06d77143ebe700 is 50, key is test_row_0/B:col10/1733801619278/Put/seqid=0 2024-12-10T03:33:40,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741843_1019 (size=12001) 2024-12-10T03:33:40,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/80c1555cb9a948dcad06d77143ebe700 2024-12-10T03:33:40,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/0e215f817d0941638726e113e9c2e506 is 50, key is test_row_0/C:col10/1733801619278/Put/seqid=0 2024-12-10T03:33:40,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741844_1020 (size=12001) 2024-12-10T03:33:40,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/0e215f817d0941638726e113e9c2e506 2024-12-10T03:33:40,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801680091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/78d8c89704af4b15af2936c0c535197d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/78d8c89704af4b15af2936c0c535197d 2024-12-10T03:33:40,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801680094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,106 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,106 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T03:33:40,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:40,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:40,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:40,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:40,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:40,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/78d8c89704af4b15af2936c0c535197d, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T03:33:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801680096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801680096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801680098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/80c1555cb9a948dcad06d77143ebe700 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/80c1555cb9a948dcad06d77143ebe700 2024-12-10T03:33:40,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/80c1555cb9a948dcad06d77143ebe700, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T03:33:40,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/0e215f817d0941638726e113e9c2e506 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0e215f817d0941638726e113e9c2e506 2024-12-10T03:33:40,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0e215f817d0941638726e113e9c2e506, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T03:33:40,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for c1d70420fe32ca56286cb5d739cf351d in 184ms, sequenceid=41, compaction requested=false 2024-12-10T03:33:40,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T03:33:40,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T03:33:40,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:40,262 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T03:33:40,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:40,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:40,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:40,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:40,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:40,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:40,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/17eec1e7013246c49b384251cef36489 is 50, key is test_row_0/A:col10/1733801619970/Put/seqid=0 2024-12-10T03:33:40,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741845_1021 (size=12001) 2024-12-10T03:33:40,290 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/17eec1e7013246c49b384251cef36489 2024-12-10T03:33:40,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c4aef55db7cf4d428f00c29042545cc9 is 50, key is test_row_0/B:col10/1733801619970/Put/seqid=0 2024-12-10T03:33:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:40,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:40,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741846_1022 (size=12001) 2024-12-10T03:33:40,332 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c4aef55db7cf4d428f00c29042545cc9 2024-12-10T03:33:40,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a257cffc46404edf883f4df647410df6 is 50, key is test_row_0/C:col10/1733801619970/Put/seqid=0 2024-12-10T03:33:40,357 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T03:33:40,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801680384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801680386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801680394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741847_1023 (size=12001) 2024-12-10T03:33:40,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801680397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801680399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801680499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801680499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801680503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801680527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801680527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,543 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T03:33:40,544 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-10T03:33:40,546 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-10T03:33:40,546 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-10T03:33:40,547 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T03:33:40,547 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-10T03:33:40,547 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-10T03:33:40,548 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-10T03:33:40,549 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T03:33:40,549 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-10T03:33:40,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801680708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801680707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801680720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801680736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801680738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:40,812 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a257cffc46404edf883f4df647410df6 2024-12-10T03:33:40,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/17eec1e7013246c49b384251cef36489 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/17eec1e7013246c49b384251cef36489 2024-12-10T03:33:40,842 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/17eec1e7013246c49b384251cef36489, entries=150, sequenceid=49, filesize=11.7 K 2024-12-10T03:33:40,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c4aef55db7cf4d428f00c29042545cc9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c4aef55db7cf4d428f00c29042545cc9 2024-12-10T03:33:40,856 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c4aef55db7cf4d428f00c29042545cc9, entries=150, sequenceid=49, filesize=11.7 K 2024-12-10T03:33:40,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a257cffc46404edf883f4df647410df6 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a257cffc46404edf883f4df647410df6 2024-12-10T03:33:40,881 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a257cffc46404edf883f4df647410df6, entries=150, sequenceid=49, filesize=11.7 K 2024-12-10T03:33:40,883 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for c1d70420fe32ca56286cb5d739cf351d in 621ms, sequenceid=49, compaction requested=true 2024-12-10T03:33:40,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:40,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:40,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-10T03:33:40,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-10T03:33:40,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-10T03:33:40,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7950 sec 2024-12-10T03:33:40,896 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.8160 sec 2024-12-10T03:33:41,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:41,032 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-10T03:33:41,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:41,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:41,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:41,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:41,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:41,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:41,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/bc8da914fca4457c92e7983a5fc00fa1 is 50, key is test_row_0/A:col10/1733801620394/Put/seqid=0 2024-12-10T03:33:41,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801681034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801681036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801681046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801681048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801681049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741848_1024 (size=12001) 2024-12-10T03:33:41,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/bc8da914fca4457c92e7983a5fc00fa1 2024-12-10T03:33:41,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/e145990b64fc475d97193ab61b9a0802 is 50, key is test_row_0/B:col10/1733801620394/Put/seqid=0 2024-12-10T03:33:41,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741849_1025 (size=12001) 2024-12-10T03:33:41,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801681151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801681151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801681157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T03:33:41,196 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-10T03:33:41,200 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:41,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-10T03:33:41,205 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:41,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T03:33:41,207 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:41,207 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:41,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T03:33:41,360 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T03:33:41,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:41,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:41,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:41,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:41,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:41,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801681360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801681361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801681363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T03:33:41,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/e145990b64fc475d97193ab61b9a0802 2024-12-10T03:33:41,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T03:33:41,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:41,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:41,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:41,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:41,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:41,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:41,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/10145fd8dfa54a8193c61307dff51db6 is 50, key is test_row_0/C:col10/1733801620394/Put/seqid=0 2024-12-10T03:33:41,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801681559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801681559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741850_1026 (size=12001) 2024-12-10T03:33:41,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/10145fd8dfa54a8193c61307dff51db6 2024-12-10T03:33:41,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/bc8da914fca4457c92e7983a5fc00fa1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/bc8da914fca4457c92e7983a5fc00fa1 2024-12-10T03:33:41,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/bc8da914fca4457c92e7983a5fc00fa1, entries=150, sequenceid=80, filesize=11.7 K 2024-12-10T03:33:41,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/e145990b64fc475d97193ab61b9a0802 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e145990b64fc475d97193ab61b9a0802 2024-12-10T03:33:41,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e145990b64fc475d97193ab61b9a0802, entries=150, sequenceid=80, filesize=11.7 K 2024-12-10T03:33:41,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/10145fd8dfa54a8193c61307dff51db6 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/10145fd8dfa54a8193c61307dff51db6 2024-12-10T03:33:41,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/10145fd8dfa54a8193c61307dff51db6, entries=150, sequenceid=80, filesize=11.7 K 2024-12-10T03:33:41,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for c1d70420fe32ca56286cb5d739cf351d in 607ms, sequenceid=80, compaction requested=true 2024-12-10T03:33:41,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:41,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:41,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:41,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:41,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:41,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:41,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:41,642 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:33:41,642 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:33:41,647 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:33:41,648 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:41,648 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:41,649 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/89e69143874a4e8f839544c7cfe019bb, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/80c1555cb9a948dcad06d77143ebe700, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c4aef55db7cf4d428f00c29042545cc9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e145990b64fc475d97193ab61b9a0802] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=46.9 K 2024-12-10T03:33:41,650 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 89e69143874a4e8f839544c7cfe019bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733801619101 2024-12-10T03:33:41,650 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:33:41,651 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:41,651 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:41,651 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 80c1555cb9a948dcad06d77143ebe700, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733801619278 2024-12-10T03:33:41,651 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/df0e075df11542689ac4a29fc43a6731, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/78d8c89704af4b15af2936c0c535197d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/17eec1e7013246c49b384251cef36489, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/bc8da914fca4457c92e7983a5fc00fa1] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=46.9 K 2024-12-10T03:33:41,652 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c4aef55db7cf4d428f00c29042545cc9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801619970 2024-12-10T03:33:41,652 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting df0e075df11542689ac4a29fc43a6731, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733801619101 2024-12-10T03:33:41,653 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting e145990b64fc475d97193ab61b9a0802, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733801620394 2024-12-10T03:33:41,654 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78d8c89704af4b15af2936c0c535197d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733801619278 2024-12-10T03:33:41,657 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17eec1e7013246c49b384251cef36489, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801619970 2024-12-10T03:33:41,659 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc8da914fca4457c92e7983a5fc00fa1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733801620394 2024-12-10T03:33:41,676 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T03:33:41,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:41,677 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-10T03:33:41,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:41,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:41,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:41,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:41,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:41,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:41,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/069b84c7755b41b0bd78b0538f7a2c4b is 50, key is test_row_1/A:col10/1733801621046/Put/seqid=0 2024-12-10T03:33:41,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:41,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:41,709 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#13 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:41,710 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/5aa08b6dd5004d8080d47788a72cb796 is 50, key is test_row_0/B:col10/1733801620394/Put/seqid=0 2024-12-10T03:33:41,721 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#14 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:41,722 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/ddd099b582b74bd29a8bc7e3427c84f0 is 50, key is test_row_0/A:col10/1733801620394/Put/seqid=0 2024-12-10T03:33:41,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741852_1028 (size=12139) 2024-12-10T03:33:41,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741851_1027 (size=7315) 2024-12-10T03:33:41,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741853_1029 (size=12139) 2024-12-10T03:33:41,758 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/ddd099b582b74bd29a8bc7e3427c84f0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/ddd099b582b74bd29a8bc7e3427c84f0 2024-12-10T03:33:41,779 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into ddd099b582b74bd29a8bc7e3427c84f0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:41,779 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:41,779 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=12, startTime=1733801621641; duration=0sec 2024-12-10T03:33:41,781 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:41,781 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:41,781 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:33:41,784 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:33:41,784 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:41,784 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:41,785 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2c3f7fb454644d62aad8c64e38755144, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0e215f817d0941638726e113e9c2e506, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a257cffc46404edf883f4df647410df6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/10145fd8dfa54a8193c61307dff51db6] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=46.9 K 2024-12-10T03:33:41,786 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c3f7fb454644d62aad8c64e38755144, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733801619101 2024-12-10T03:33:41,789 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e215f817d0941638726e113e9c2e506, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733801619278 2024-12-10T03:33:41,790 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a257cffc46404edf883f4df647410df6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801619970 2024-12-10T03:33:41,791 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10145fd8dfa54a8193c61307dff51db6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733801620394 2024-12-10T03:33:41,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T03:33:41,830 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#15 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:41,832 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/7ffa5c48975840fc97d3005b2739bd93 is 50, key is test_row_0/C:col10/1733801620394/Put/seqid=0 2024-12-10T03:33:41,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741854_1030 (size=12139) 2024-12-10T03:33:41,858 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/7ffa5c48975840fc97d3005b2739bd93 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ffa5c48975840fc97d3005b2739bd93 2024-12-10T03:33:41,877 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 7ffa5c48975840fc97d3005b2739bd93(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:41,877 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:41,878 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=12, startTime=1733801621642; duration=0sec 2024-12-10T03:33:41,878 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:41,878 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:41,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801681868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801681868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,882 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801681872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801681983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801681983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:41,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801681984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,137 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/069b84c7755b41b0bd78b0538f7a2c4b 2024-12-10T03:33:42,160 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/5aa08b6dd5004d8080d47788a72cb796 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5aa08b6dd5004d8080d47788a72cb796 2024-12-10T03:33:42,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/808f0196a774491a9e3076cfddab9493 is 50, key is test_row_1/B:col10/1733801621046/Put/seqid=0 2024-12-10T03:33:42,177 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into 5aa08b6dd5004d8080d47788a72cb796(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:42,177 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:42,177 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=12, startTime=1733801621642; duration=0sec 2024-12-10T03:33:42,178 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:42,178 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:42,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801682191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801682192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801682192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741855_1031 (size=7315) 2024-12-10T03:33:42,198 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/808f0196a774491a9e3076cfddab9493 2024-12-10T03:33:42,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5982bfb2978d419e8cc20e13ba4779d4 is 50, key is test_row_1/C:col10/1733801621046/Put/seqid=0 2024-12-10T03:33:42,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741856_1032 (size=7315) 2024-12-10T03:33:42,244 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5982bfb2978d419e8cc20e13ba4779d4 2024-12-10T03:33:42,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/069b84c7755b41b0bd78b0538f7a2c4b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/069b84c7755b41b0bd78b0538f7a2c4b 2024-12-10T03:33:42,268 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/069b84c7755b41b0bd78b0538f7a2c4b, entries=50, sequenceid=85, filesize=7.1 K 2024-12-10T03:33:42,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/808f0196a774491a9e3076cfddab9493 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/808f0196a774491a9e3076cfddab9493 2024-12-10T03:33:42,283 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/808f0196a774491a9e3076cfddab9493, entries=50, sequenceid=85, filesize=7.1 K 2024-12-10T03:33:42,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5982bfb2978d419e8cc20e13ba4779d4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5982bfb2978d419e8cc20e13ba4779d4 2024-12-10T03:33:42,301 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5982bfb2978d419e8cc20e13ba4779d4, entries=50, sequenceid=85, filesize=7.1 K 2024-12-10T03:33:42,303 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=194.56 KB/199230 for c1d70420fe32ca56286cb5d739cf351d in 625ms, sequenceid=85, compaction requested=false 2024-12-10T03:33:42,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:42,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:42,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-10T03:33:42,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-10T03:33:42,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-10T03:33:42,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0990 sec 2024-12-10T03:33:42,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T03:33:42,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.1120 sec 2024-12-10T03:33:42,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:42,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=201.27 KB heapSize=528.09 KB 2024-12-10T03:33:42,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:42,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:42,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:42,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:42,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:42,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:42,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801682502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801682503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801682506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/51a1cf09f9ae4aa6a4f522c188937e49 is 50, key is test_row_0/A:col10/1733801622501/Put/seqid=0 2024-12-10T03:33:42,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741857_1033 (size=12001) 2024-12-10T03:33:42,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.56 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/51a1cf09f9ae4aa6a4f522c188937e49 2024-12-10T03:33:42,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/00d5bf6baaf34081a044b60f21978e16 is 50, key is test_row_0/B:col10/1733801622501/Put/seqid=0 2024-12-10T03:33:42,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801682564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801682571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741858_1034 (size=12001) 2024-12-10T03:33:42,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801682608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801682609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801682612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801682811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801682812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801682818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:42,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.56 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/00d5bf6baaf34081a044b60f21978e16 2024-12-10T03:33:43,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/92805d685a9b462da6138665c5eb52da is 50, key is test_row_0/C:col10/1733801622501/Put/seqid=0 2024-12-10T03:33:43,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741859_1035 (size=12001) 2024-12-10T03:33:43,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801683118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801683116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801683122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T03:33:43,312 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-10T03:33:43,315 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:43,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-10T03:33:43,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T03:33:43,318 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:43,319 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:43,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:43,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T03:33:43,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.56 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/92805d685a9b462da6138665c5eb52da 2024-12-10T03:33:43,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/51a1cf09f9ae4aa6a4f522c188937e49 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/51a1cf09f9ae4aa6a4f522c188937e49 2024-12-10T03:33:43,472 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T03:33:43,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:43,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:43,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:43,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:43,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:43,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/51a1cf09f9ae4aa6a4f522c188937e49, entries=150, sequenceid=123, filesize=11.7 K 2024-12-10T03:33:43,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/00d5bf6baaf34081a044b60f21978e16 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/00d5bf6baaf34081a044b60f21978e16 2024-12-10T03:33:43,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/00d5bf6baaf34081a044b60f21978e16, entries=150, sequenceid=123, filesize=11.7 K 2024-12-10T03:33:43,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/92805d685a9b462da6138665c5eb52da as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/92805d685a9b462da6138665c5eb52da 2024-12-10T03:33:43,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/92805d685a9b462da6138665c5eb52da, entries=150, sequenceid=123, filesize=11.7 K 2024-12-10T03:33:43,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~214.69 KB/219840, heapSize ~563.20 KB/576720, currentSize=0 B/0 for c1d70420fe32ca56286cb5d739cf351d in 1012ms, sequenceid=123, compaction requested=true 2024-12-10T03:33:43,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:43,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:43,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:43,514 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:43,514 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:43,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:43,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:43,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:43,514 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:43,516 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 31455 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:43,516 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:43,516 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:43,517 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5aa08b6dd5004d8080d47788a72cb796, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/808f0196a774491a9e3076cfddab9493, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/00d5bf6baaf34081a044b60f21978e16] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=30.7 K 2024-12-10T03:33:43,517 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 31455 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:43,517 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:43,517 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:43,517 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/ddd099b582b74bd29a8bc7e3427c84f0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/069b84c7755b41b0bd78b0538f7a2c4b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/51a1cf09f9ae4aa6a4f522c188937e49] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=30.7 K 2024-12-10T03:33:43,518 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 5aa08b6dd5004d8080d47788a72cb796, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733801620394 2024-12-10T03:33:43,519 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 808f0196a774491a9e3076cfddab9493, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733801621046 2024-12-10T03:33:43,519 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddd099b582b74bd29a8bc7e3427c84f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733801620394 2024-12-10T03:33:43,519 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 00d5bf6baaf34081a044b60f21978e16, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733801621830 2024-12-10T03:33:43,519 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 069b84c7755b41b0bd78b0538f7a2c4b, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733801621046 2024-12-10T03:33:43,522 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51a1cf09f9ae4aa6a4f522c188937e49, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733801621830 2024-12-10T03:33:43,540 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#21 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:43,542 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/9143b97d11674514804c851fb8bc4504 is 50, key is test_row_0/A:col10/1733801622501/Put/seqid=0 2024-12-10T03:33:43,544 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#22 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:43,545 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/bf187ee731e54de88f1b6a8575e1ce29 is 50, key is test_row_0/B:col10/1733801622501/Put/seqid=0 2024-12-10T03:33:43,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741861_1037 (size=12241) 2024-12-10T03:33:43,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741860_1036 (size=12241) 2024-12-10T03:33:43,581 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/bf187ee731e54de88f1b6a8575e1ce29 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bf187ee731e54de88f1b6a8575e1ce29 2024-12-10T03:33:43,597 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into bf187ee731e54de88f1b6a8575e1ce29(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:43,597 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:43,597 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801623514; duration=0sec 2024-12-10T03:33:43,598 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:43,598 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:43,598 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:43,600 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 31455 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:43,601 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:43,601 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:43,601 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ffa5c48975840fc97d3005b2739bd93, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5982bfb2978d419e8cc20e13ba4779d4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/92805d685a9b462da6138665c5eb52da] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=30.7 K 2024-12-10T03:33:43,602 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ffa5c48975840fc97d3005b2739bd93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1733801620394 2024-12-10T03:33:43,603 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 5982bfb2978d419e8cc20e13ba4779d4, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1733801621046 2024-12-10T03:33:43,604 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 92805d685a9b462da6138665c5eb52da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733801621830 2024-12-10T03:33:43,620 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#23 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T03:33:43,621 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/2542fff02c3e499293e5671c2f899de7 is 50, key is test_row_0/C:col10/1733801622501/Put/seqid=0 2024-12-10T03:33:43,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T03:33:43,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:43,628 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-10T03:33:43,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:43,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:43,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/77c819587a3740e391ca45a80ace6cb4 is 50, key is test_row_0/A:col10/1733801623625/Put/seqid=0 2024-12-10T03:33:43,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:43,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:43,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741862_1038 (size=12241) 2024-12-10T03:33:43,661 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/2542fff02c3e499293e5671c2f899de7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2542fff02c3e499293e5671c2f899de7 2024-12-10T03:33:43,675 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 2542fff02c3e499293e5671c2f899de7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:43,675 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:43,675 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801623514; duration=0sec 2024-12-10T03:33:43,675 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:43,675 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:43,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741863_1039 (size=7315) 2024-12-10T03:33:43,681 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/77c819587a3740e391ca45a80ace6cb4 2024-12-10T03:33:43,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d709e98dce944ff3bed776361880c4aa is 50, key is test_row_0/B:col10/1733801623625/Put/seqid=0 2024-12-10T03:33:43,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801683701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801683704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,708 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801683707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741864_1040 (size=7315) 2024-12-10T03:33:43,727 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d709e98dce944ff3bed776361880c4aa 2024-12-10T03:33:43,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/ecfaad489a31499dba090e247cd2a1e8 is 50, key is test_row_0/C:col10/1733801623625/Put/seqid=0 2024-12-10T03:33:43,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741865_1041 (size=7315) 2024-12-10T03:33:43,767 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/ecfaad489a31499dba090e247cd2a1e8 2024-12-10T03:33:43,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/77c819587a3740e391ca45a80ace6cb4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/77c819587a3740e391ca45a80ace6cb4 2024-12-10T03:33:43,788 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/77c819587a3740e391ca45a80ace6cb4, entries=50, sequenceid=128, filesize=7.1 K 2024-12-10T03:33:43,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d709e98dce944ff3bed776361880c4aa as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d709e98dce944ff3bed776361880c4aa 2024-12-10T03:33:43,800 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d709e98dce944ff3bed776361880c4aa, entries=50, sequenceid=128, filesize=7.1 K 2024-12-10T03:33:43,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/ecfaad489a31499dba090e247cd2a1e8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/ecfaad489a31499dba090e247cd2a1e8 2024-12-10T03:33:43,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801683806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801683810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:43,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801683810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:43,818 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/ecfaad489a31499dba090e247cd2a1e8, entries=50, sequenceid=128, filesize=7.1 K 2024-12-10T03:33:43,821 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=201.27 KB/206100 for c1d70420fe32ca56286cb5d739cf351d in 194ms, sequenceid=128, compaction requested=false 2024-12-10T03:33:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-10T03:33:43,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-10T03:33:43,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-10T03:33:43,827 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 504 msec 2024-12-10T03:33:43,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 513 msec 2024-12-10T03:33:43,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T03:33:43,922 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-10T03:33:43,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:43,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-10T03:33:43,929 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:43,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T03:33:43,931 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:43,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:43,980 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/9143b97d11674514804c851fb8bc4504 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9143b97d11674514804c851fb8bc4504 2024-12-10T03:33:43,994 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into 9143b97d11674514804c851fb8bc4504(size=12.0 K), total size for store is 19.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:43,994 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:43,995 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801623513; duration=0sec 2024-12-10T03:33:43,995 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:43,995 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:44,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:44,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=201.27 KB heapSize=528.09 KB 2024-12-10T03:33:44,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:44,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801684013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801684013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:44,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:44,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801684014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/3186e7dd80c04f13afdb9f0e3de63c0a is 50, key is test_row_0/A:col10/1733801623702/Put/seqid=0 2024-12-10T03:33:44,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T03:33:44,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741866_1042 (size=12151) 2024-12-10T03:33:44,084 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T03:33:44,240 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,241 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:44,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:44,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801684317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801684318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801684318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,395 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:44,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:44,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/3186e7dd80c04f13afdb9f0e3de63c0a 2024-12-10T03:33:44,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c45a7e6cd8594885ab6f9f2451b66c93 is 50, key is test_row_0/B:col10/1733801623702/Put/seqid=0 2024-12-10T03:33:44,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741867_1043 (size=12151) 2024-12-10T03:33:44,464 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c45a7e6cd8594885ab6f9f2451b66c93 2024-12-10T03:33:44,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/0d56c109a7884bfcb5277722873e444a is 50, key is test_row_0/C:col10/1733801623702/Put/seqid=0 2024-12-10T03:33:44,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741868_1044 (size=12151) 2024-12-10T03:33:44,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/0d56c109a7884bfcb5277722873e444a 2024-12-10T03:33:44,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/3186e7dd80c04f13afdb9f0e3de63c0a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/3186e7dd80c04f13afdb9f0e3de63c0a 2024-12-10T03:33:44,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/3186e7dd80c04f13afdb9f0e3de63c0a, entries=150, sequenceid=163, filesize=11.9 K 2024-12-10T03:33:44,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c45a7e6cd8594885ab6f9f2451b66c93 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c45a7e6cd8594885ab6f9f2451b66c93 2024-12-10T03:33:44,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T03:33:44,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c45a7e6cd8594885ab6f9f2451b66c93, entries=150, sequenceid=163, filesize=11.9 K 2024-12-10T03:33:44,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/0d56c109a7884bfcb5277722873e444a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0d56c109a7884bfcb5277722873e444a 2024-12-10T03:33:44,548 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:44,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:44,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0d56c109a7884bfcb5277722873e444a, entries=150, sequenceid=163, filesize=11.9 K 2024-12-10T03:33:44,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~201.27 KB/206100, heapSize ~528.05 KB/540720, currentSize=0 B/0 for c1d70420fe32ca56286cb5d739cf351d in 542ms, sequenceid=163, compaction requested=true 2024-12-10T03:33:44,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:44,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:44,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:44,557 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:44,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:44,557 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:44,557 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:44,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:44,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:44,559 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 31707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:44,559 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 31707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:44,559 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:44,559 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:44,559 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,559 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,559 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9143b97d11674514804c851fb8bc4504, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/77c819587a3740e391ca45a80ace6cb4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/3186e7dd80c04f13afdb9f0e3de63c0a] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=31.0 K 2024-12-10T03:33:44,559 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bf187ee731e54de88f1b6a8575e1ce29, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d709e98dce944ff3bed776361880c4aa, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c45a7e6cd8594885ab6f9f2451b66c93] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=31.0 K 2024-12-10T03:33:44,560 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9143b97d11674514804c851fb8bc4504, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733801621830 2024-12-10T03:33:44,560 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting bf187ee731e54de88f1b6a8575e1ce29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733801621830 2024-12-10T03:33:44,561 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77c819587a3740e391ca45a80ace6cb4, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733801623625 2024-12-10T03:33:44,561 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d709e98dce944ff3bed776361880c4aa, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733801623625 2024-12-10T03:33:44,562 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3186e7dd80c04f13afdb9f0e3de63c0a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801623696 2024-12-10T03:33:44,562 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c45a7e6cd8594885ab6f9f2451b66c93, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801623696 2024-12-10T03:33:44,579 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:44,580 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/06f9b82e0ecb457282078b8da0d7f577 is 50, key is test_row_0/A:col10/1733801623702/Put/seqid=0 2024-12-10T03:33:44,588 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#31 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:44,589 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1bd5f830b5744c6fa9a93245559ddbcc is 50, key is test_row_0/B:col10/1733801623702/Put/seqid=0 2024-12-10T03:33:44,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741869_1045 (size=12493) 2024-12-10T03:33:44,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:44,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:33:44,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:44,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:44,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:44,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,609 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/06f9b82e0ecb457282078b8da0d7f577 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/06f9b82e0ecb457282078b8da0d7f577 2024-12-10T03:33:44,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6cb9809decda4d2eb5543554ebe5889f is 50, key is test_row_0/A:col10/1733801624595/Put/seqid=0 2024-12-10T03:33:44,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741870_1046 (size=12493) 2024-12-10T03:33:44,623 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into 06f9b82e0ecb457282078b8da0d7f577(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:44,623 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:44,623 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801624557; duration=0sec 2024-12-10T03:33:44,624 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:44,624 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:44,624 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:44,626 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 31707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:44,626 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:44,626 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,627 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2542fff02c3e499293e5671c2f899de7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/ecfaad489a31499dba090e247cd2a1e8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0d56c109a7884bfcb5277722873e444a] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=31.0 K 2024-12-10T03:33:44,627 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1bd5f830b5744c6fa9a93245559ddbcc as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1bd5f830b5744c6fa9a93245559ddbcc 2024-12-10T03:33:44,628 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2542fff02c3e499293e5671c2f899de7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733801621830 2024-12-10T03:33:44,628 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting ecfaad489a31499dba090e247cd2a1e8, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733801623625 2024-12-10T03:33:44,629 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d56c109a7884bfcb5277722873e444a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801623696 2024-12-10T03:33:44,643 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into 1bd5f830b5744c6fa9a93245559ddbcc(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:44,643 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:44,643 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801624557; duration=0sec 2024-12-10T03:33:44,643 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:44,643 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:44,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741871_1047 (size=14541) 2024-12-10T03:33:44,665 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#33 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:44,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6cb9809decda4d2eb5543554ebe5889f 2024-12-10T03:33:44,666 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/893546d4f53849488c0a3b16058d1b38 is 50, key is test_row_0/C:col10/1733801623702/Put/seqid=0 2024-12-10T03:33:44,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741872_1048 (size=12493) 2024-12-10T03:33:44,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d75abf6e0b054bc28767eab3ccde9f7d is 50, key is test_row_0/B:col10/1733801624595/Put/seqid=0 2024-12-10T03:33:44,703 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:44,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:44,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,704 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/893546d4f53849488c0a3b16058d1b38 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/893546d4f53849488c0a3b16058d1b38 2024-12-10T03:33:44,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741873_1049 (size=12151) 2024-12-10T03:33:44,718 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 893546d4f53849488c0a3b16058d1b38(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:44,718 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:44,718 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801624557; duration=0sec 2024-12-10T03:33:44,718 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:44,718 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:44,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d75abf6e0b054bc28767eab3ccde9f7d 2024-12-10T03:33:44,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/16e651a03db645dd8c827759bfd739c5 is 50, key is test_row_0/C:col10/1733801624595/Put/seqid=0 2024-12-10T03:33:44,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801684733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801684733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741874_1050 (size=12151) 2024-12-10T03:33:44,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/16e651a03db645dd8c827759bfd739c5 2024-12-10T03:33:44,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6cb9809decda4d2eb5543554ebe5889f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6cb9809decda4d2eb5543554ebe5889f 2024-12-10T03:33:44,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6cb9809decda4d2eb5543554ebe5889f, entries=200, sequenceid=175, filesize=14.2 K 2024-12-10T03:33:44,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d75abf6e0b054bc28767eab3ccde9f7d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d75abf6e0b054bc28767eab3ccde9f7d 2024-12-10T03:33:44,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d75abf6e0b054bc28767eab3ccde9f7d, entries=150, sequenceid=175, filesize=11.9 K 2024-12-10T03:33:44,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/16e651a03db645dd8c827759bfd739c5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/16e651a03db645dd8c827759bfd739c5 2024-12-10T03:33:44,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/16e651a03db645dd8c827759bfd739c5, entries=150, sequenceid=175, filesize=11.9 K 2024-12-10T03:33:44,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c1d70420fe32ca56286cb5d739cf351d in 212ms, sequenceid=175, compaction requested=false 2024-12-10T03:33:44,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:44,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:44,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T03:33:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:44,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:44,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/baa87e3b8bcc4822820dfedc78654dc9 is 50, key is test_row_0/A:col10/1733801624730/Put/seqid=0 2024-12-10T03:33:44,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801684837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801684838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801684839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801684841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801684844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741875_1051 (size=12151) 2024-12-10T03:33:44,857 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:44,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:44,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:44,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:44,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801684943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801684943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801684948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:44,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:44,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801684948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,011 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T03:33:45,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801685044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801685148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801685148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801685153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801685153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,180 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:45,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/baa87e3b8bcc4822820dfedc78654dc9 2024-12-10T03:33:45,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/4a54ce9331d341a2a354ff4f96aae174 is 50, key is test_row_0/B:col10/1733801624730/Put/seqid=0 2024-12-10T03:33:45,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741876_1052 (size=12151) 2024-12-10T03:33:45,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/4a54ce9331d341a2a354ff4f96aae174 2024-12-10T03:33:45,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/469a63abb87d46f99532150e14585831 is 50, key is test_row_0/C:col10/1733801624730/Put/seqid=0 2024-12-10T03:33:45,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741877_1053 (size=12151) 2024-12-10T03:33:45,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/469a63abb87d46f99532150e14585831 2024-12-10T03:33:45,335 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:45,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:45,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/baa87e3b8bcc4822820dfedc78654dc9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/baa87e3b8bcc4822820dfedc78654dc9 2024-12-10T03:33:45,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801685348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/baa87e3b8bcc4822820dfedc78654dc9, entries=150, sequenceid=204, filesize=11.9 K 2024-12-10T03:33:45,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/4a54ce9331d341a2a354ff4f96aae174 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4a54ce9331d341a2a354ff4f96aae174 2024-12-10T03:33:45,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4a54ce9331d341a2a354ff4f96aae174, entries=150, sequenceid=204, filesize=11.9 K 2024-12-10T03:33:45,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/469a63abb87d46f99532150e14585831 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/469a63abb87d46f99532150e14585831 2024-12-10T03:33:45,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/469a63abb87d46f99532150e14585831, entries=150, sequenceid=204, filesize=11.9 K 2024-12-10T03:33:45,373 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for c1d70420fe32ca56286cb5d739cf351d in 551ms, sequenceid=204, compaction requested=true 2024-12-10T03:33:45,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:45,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:45,373 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:45,373 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:45,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:45,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:45,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:45,374 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:45,375 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:45,375 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:45,375 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,375 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1bd5f830b5744c6fa9a93245559ddbcc, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d75abf6e0b054bc28767eab3ccde9f7d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4a54ce9331d341a2a354ff4f96aae174] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=35.9 K 2024-12-10T03:33:45,376 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:45,376 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:45,376 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,376 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/06f9b82e0ecb457282078b8da0d7f577, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6cb9809decda4d2eb5543554ebe5889f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/baa87e3b8bcc4822820dfedc78654dc9] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=38.3 K 2024-12-10T03:33:45,376 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bd5f830b5744c6fa9a93245559ddbcc, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801623696 2024-12-10T03:33:45,377 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06f9b82e0ecb457282078b8da0d7f577, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801623696 2024-12-10T03:33:45,377 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d75abf6e0b054bc28767eab3ccde9f7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733801624595 2024-12-10T03:33:45,377 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cb9809decda4d2eb5543554ebe5889f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733801624591 2024-12-10T03:33:45,379 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting baa87e3b8bcc4822820dfedc78654dc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733801624730 2024-12-10T03:33:45,379 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a54ce9331d341a2a354ff4f96aae174, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733801624730 2024-12-10T03:33:45,396 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:45,399 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/1cbaa9a6a4f446f687dce25ee8eb1dda is 50, key is test_row_0/A:col10/1733801624730/Put/seqid=0 2024-12-10T03:33:45,402 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:45,402 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/bc5a8c45cab74379a6ec5ca8adc30b82 is 50, key is test_row_0/B:col10/1733801624730/Put/seqid=0 2024-12-10T03:33:45,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741878_1054 (size=12595) 2024-12-10T03:33:45,432 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/1cbaa9a6a4f446f687dce25ee8eb1dda as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1cbaa9a6a4f446f687dce25ee8eb1dda 2024-12-10T03:33:45,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741879_1055 (size=12595) 2024-12-10T03:33:45,445 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/bc5a8c45cab74379a6ec5ca8adc30b82 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bc5a8c45cab74379a6ec5ca8adc30b82 2024-12-10T03:33:45,447 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into 1cbaa9a6a4f446f687dce25ee8eb1dda(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:45,447 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,447 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801625373; duration=0sec 2024-12-10T03:33:45,447 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:45,447 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:45,447 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:45,450 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:45,450 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:45,450 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,451 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/893546d4f53849488c0a3b16058d1b38, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/16e651a03db645dd8c827759bfd739c5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/469a63abb87d46f99532150e14585831] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=35.9 K 2024-12-10T03:33:45,452 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 893546d4f53849488c0a3b16058d1b38, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801623696 2024-12-10T03:33:45,454 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16e651a03db645dd8c827759bfd739c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733801624595 2024-12-10T03:33:45,455 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 469a63abb87d46f99532150e14585831, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733801624730 2024-12-10T03:33:45,459 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into bc5a8c45cab74379a6ec5ca8adc30b82(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:45,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:45,459 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:33:45,459 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801625373; duration=0sec 2024-12-10T03:33:45,460 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:45,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:45,460 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:45,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:45,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:45,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/327f95038a4b4f30806aaba66afb0827 is 50, key is test_row_0/A:col10/1733801625457/Put/seqid=0 2024-12-10T03:33:45,479 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:45,480 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5da0795fc09a4e18b68a8e1551938e64 is 50, key is test_row_0/C:col10/1733801624730/Put/seqid=0 2024-12-10T03:33:45,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741880_1056 (size=12151) 2024-12-10T03:33:45,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/327f95038a4b4f30806aaba66afb0827 2024-12-10T03:33:45,490 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:45,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:45,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741881_1057 (size=12595) 2024-12-10T03:33:45,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/edd6b446f60d4219927f6b9a211f83ad is 50, key is test_row_0/B:col10/1733801625457/Put/seqid=0 2024-12-10T03:33:45,505 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5da0795fc09a4e18b68a8e1551938e64 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5da0795fc09a4e18b68a8e1551938e64 2024-12-10T03:33:45,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801685507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,513 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 5da0795fc09a4e18b68a8e1551938e64(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:45,513 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801685506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,513 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801625374; duration=0sec 2024-12-10T03:33:45,513 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:45,513 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:45,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801685512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801685514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741882_1058 (size=12151) 2024-12-10T03:33:45,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/edd6b446f60d4219927f6b9a211f83ad 2024-12-10T03:33:45,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/d3e53045bf354771beb305327db6e216 is 50, key is test_row_0/C:col10/1733801625457/Put/seqid=0 2024-12-10T03:33:45,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741883_1059 (size=12151) 2024-12-10T03:33:45,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/d3e53045bf354771beb305327db6e216 2024-12-10T03:33:45,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/327f95038a4b4f30806aaba66afb0827 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/327f95038a4b4f30806aaba66afb0827 2024-12-10T03:33:45,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/327f95038a4b4f30806aaba66afb0827, entries=150, sequenceid=219, filesize=11.9 K 2024-12-10T03:33:45,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/edd6b446f60d4219927f6b9a211f83ad as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/edd6b446f60d4219927f6b9a211f83ad 2024-12-10T03:33:45,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/edd6b446f60d4219927f6b9a211f83ad, entries=150, sequenceid=219, filesize=11.9 K 2024-12-10T03:33:45,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/d3e53045bf354771beb305327db6e216 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d3e53045bf354771beb305327db6e216 2024-12-10T03:33:45,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d3e53045bf354771beb305327db6e216, entries=150, sequenceid=219, filesize=11.9 K 2024-12-10T03:33:45,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801685615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for c1d70420fe32ca56286cb5d739cf351d in 160ms, sequenceid=219, compaction requested=false 2024-12-10T03:33:45,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:45,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:33:45,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:45,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:45,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:45,621 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/b6dc20672ac145c88f2b513baf929621 is 50, key is test_row_0/A:col10/1733801625618/Put/seqid=0 2024-12-10T03:33:45,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801685631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801685633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801685634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741884_1060 (size=12151) 2024-12-10T03:33:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/b6dc20672ac145c88f2b513baf929621 2024-12-10T03:33:45,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/8ebb0db620c24944b81afc3f447441f2 is 50, key is test_row_0/B:col10/1733801625618/Put/seqid=0 2024-12-10T03:33:45,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741885_1061 (size=12151) 2024-12-10T03:33:45,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/8ebb0db620c24944b81afc3f447441f2 2024-12-10T03:33:45,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/25a397c978aa49d9bb90eb49e37afb43 is 50, key is test_row_0/C:col10/1733801625618/Put/seqid=0 2024-12-10T03:33:45,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741886_1062 (size=12151) 2024-12-10T03:33:45,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/25a397c978aa49d9bb90eb49e37afb43 2024-12-10T03:33:45,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/b6dc20672ac145c88f2b513baf929621 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6dc20672ac145c88f2b513baf929621 2024-12-10T03:33:45,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6dc20672ac145c88f2b513baf929621, entries=150, sequenceid=245, filesize=11.9 K 2024-12-10T03:33:45,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/8ebb0db620c24944b81afc3f447441f2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/8ebb0db620c24944b81afc3f447441f2 2024-12-10T03:33:45,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/8ebb0db620c24944b81afc3f447441f2, entries=150, sequenceid=245, filesize=11.9 K 2024-12-10T03:33:45,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/25a397c978aa49d9bb90eb49e37afb43 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/25a397c978aa49d9bb90eb49e37afb43 2024-12-10T03:33:45,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801685737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/25a397c978aa49d9bb90eb49e37afb43, entries=150, sequenceid=245, filesize=11.9 K 2024-12-10T03:33:45,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c1d70420fe32ca56286cb5d739cf351d in 122ms, sequenceid=245, compaction requested=true 2024-12-10T03:33:45,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:45,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:45,743 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:45,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:45,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:45,743 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:45,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:45,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:45,745 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:45,746 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:45,746 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,746 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bc5a8c45cab74379a6ec5ca8adc30b82, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/edd6b446f60d4219927f6b9a211f83ad, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/8ebb0db620c24944b81afc3f447441f2] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.0 K 2024-12-10T03:33:45,747 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:45,747 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:45,747 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,747 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1cbaa9a6a4f446f687dce25ee8eb1dda, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/327f95038a4b4f30806aaba66afb0827, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6dc20672ac145c88f2b513baf929621] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.0 K 2024-12-10T03:33:45,748 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting bc5a8c45cab74379a6ec5ca8adc30b82, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733801624730 2024-12-10T03:33:45,748 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cbaa9a6a4f446f687dce25ee8eb1dda, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733801624730 2024-12-10T03:33:45,749 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting edd6b446f60d4219927f6b9a211f83ad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733801624840 2024-12-10T03:33:45,749 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 327f95038a4b4f30806aaba66afb0827, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733801624840 2024-12-10T03:33:45,750 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6dc20672ac145c88f2b513baf929621, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733801625510 2024-12-10T03:33:45,750 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ebb0db620c24944b81afc3f447441f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733801625510 2024-12-10T03:33:45,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:33:45,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:45,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:45,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:45,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:45,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/44df4f200f36407e89a63a856d6d023a is 50, key is test_row_0/A:col10/1733801625744/Put/seqid=0 2024-12-10T03:33:45,769 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:45,770 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/a8fb61179e5f4d1480b91ee4b38c0bc3 is 50, key is test_row_0/B:col10/1733801625618/Put/seqid=0 2024-12-10T03:33:45,773 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#50 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:45,774 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/4fb6eab13a4a4d51b1c842200bf2109c is 50, key is test_row_0/A:col10/1733801625618/Put/seqid=0 2024-12-10T03:33:45,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801685795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,798 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:45,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:45,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801685796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741887_1063 (size=12251) 2024-12-10T03:33:45,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741888_1064 (size=12697) 2024-12-10T03:33:45,822 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/a8fb61179e5f4d1480b91ee4b38c0bc3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a8fb61179e5f4d1480b91ee4b38c0bc3 2024-12-10T03:33:45,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801685821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741889_1065 (size=12697) 2024-12-10T03:33:45,831 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/4fb6eab13a4a4d51b1c842200bf2109c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/4fb6eab13a4a4d51b1c842200bf2109c 2024-12-10T03:33:45,834 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into a8fb61179e5f4d1480b91ee4b38c0bc3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:45,834 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,834 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801625743; duration=0sec 2024-12-10T03:33:45,836 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:45,836 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:45,836 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:45,840 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:45,840 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:45,840 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,841 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5da0795fc09a4e18b68a8e1551938e64, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d3e53045bf354771beb305327db6e216, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/25a397c978aa49d9bb90eb49e37afb43] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.0 K 2024-12-10T03:33:45,841 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into 4fb6eab13a4a4d51b1c842200bf2109c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:45,841 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,841 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801625743; duration=0sec 2024-12-10T03:33:45,841 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:45,841 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 5da0795fc09a4e18b68a8e1551938e64, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1733801624730 2024-12-10T03:33:45,841 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:45,842 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d3e53045bf354771beb305327db6e216, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733801624840 2024-12-10T03:33:45,843 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 25a397c978aa49d9bb90eb49e37afb43, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733801625510 2024-12-10T03:33:45,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801685852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,854 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:45,855 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/05454fc72e9142d9a78d9c04db257b98 is 50, key is test_row_0/C:col10/1733801625618/Put/seqid=0 2024-12-10T03:33:45,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741890_1066 (size=12697) 2024-12-10T03:33:45,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801685900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801685902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,904 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/05454fc72e9142d9a78d9c04db257b98 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05454fc72e9142d9a78d9c04db257b98 2024-12-10T03:33:45,914 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 05454fc72e9142d9a78d9c04db257b98(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:45,914 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:45,914 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801625743; duration=0sec 2024-12-10T03:33:45,914 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:45,914 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:45,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:45,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801685942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,952 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:45,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:45,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:45,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:45,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:45,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T03:33:46,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801686103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,106 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801686106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:46,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:46,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801686124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/44df4f200f36407e89a63a856d6d023a 2024-12-10T03:33:46,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/f4b0050703194dcab940199c071eb782 is 50, key is test_row_0/B:col10/1733801625744/Put/seqid=0 2024-12-10T03:33:46,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741891_1067 (size=12251) 2024-12-10T03:33:46,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/f4b0050703194dcab940199c071eb782 2024-12-10T03:33:46,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801686245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/f0f90c1e65664fcbb6f81b0021f77e1b is 50, key is test_row_0/C:col10/1733801625744/Put/seqid=0 2024-12-10T03:33:46,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:46,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:46,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741892_1068 (size=12251) 2024-12-10T03:33:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801686408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801686413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,420 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:46,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:46,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,421 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,574 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:46,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:46,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801686629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/f0f90c1e65664fcbb6f81b0021f77e1b 2024-12-10T03:33:46,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/44df4f200f36407e89a63a856d6d023a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/44df4f200f36407e89a63a856d6d023a 2024-12-10T03:33:46,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/44df4f200f36407e89a63a856d6d023a, entries=150, sequenceid=258, filesize=12.0 K 2024-12-10T03:33:46,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/f4b0050703194dcab940199c071eb782 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f4b0050703194dcab940199c071eb782 2024-12-10T03:33:46,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f4b0050703194dcab940199c071eb782, entries=150, sequenceid=258, filesize=12.0 K 2024-12-10T03:33:46,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/f0f90c1e65664fcbb6f81b0021f77e1b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f0f90c1e65664fcbb6f81b0021f77e1b 2024-12-10T03:33:46,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f0f90c1e65664fcbb6f81b0021f77e1b, entries=150, sequenceid=258, filesize=12.0 K 2024-12-10T03:33:46,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for c1d70420fe32ca56286cb5d739cf351d in 964ms, sequenceid=258, compaction requested=false 2024-12-10T03:33:46,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:46,727 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,728 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T03:33:46,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,728 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T03:33:46,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:46,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:46,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:46,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:46,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:46,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:46,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a408d820e123434c85da0d1242966669 is 50, key is test_row_0/A:col10/1733801625786/Put/seqid=0 2024-12-10T03:33:46,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741893_1069 (size=12301) 2024-12-10T03:33:46,750 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a408d820e123434c85da0d1242966669 2024-12-10T03:33:46,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:46,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:46,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/f620d060b0f745c5888faaf1e1361542 is 50, key is test_row_0/B:col10/1733801625786/Put/seqid=0 2024-12-10T03:33:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741894_1070 (size=12301) 2024-12-10T03:33:46,769 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/f620d060b0f745c5888faaf1e1361542 2024-12-10T03:33:46,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/402bb7d762e14ce99ee106d0f12c1f52 is 50, key is test_row_0/C:col10/1733801625786/Put/seqid=0 2024-12-10T03:33:46,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741895_1071 (size=12301) 2024-12-10T03:33:46,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801686796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,800 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/402bb7d762e14ce99ee106d0f12c1f52 2024-12-10T03:33:46,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a408d820e123434c85da0d1242966669 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a408d820e123434c85da0d1242966669 2024-12-10T03:33:46,817 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a408d820e123434c85da0d1242966669, entries=150, sequenceid=284, filesize=12.0 K 2024-12-10T03:33:46,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/f620d060b0f745c5888faaf1e1361542 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f620d060b0f745c5888faaf1e1361542 2024-12-10T03:33:46,827 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f620d060b0f745c5888faaf1e1361542, entries=150, sequenceid=284, filesize=12.0 K 2024-12-10T03:33:46,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/402bb7d762e14ce99ee106d0f12c1f52 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/402bb7d762e14ce99ee106d0f12c1f52 2024-12-10T03:33:46,836 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/402bb7d762e14ce99ee106d0f12c1f52, entries=150, sequenceid=284, filesize=12.0 K 2024-12-10T03:33:46,838 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c1d70420fe32ca56286cb5d739cf351d in 110ms, sequenceid=284, compaction requested=true 2024-12-10T03:33:46,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:46,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:46,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-10T03:33:46,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-10T03:33:46,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-10T03:33:46,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9120 sec 2024-12-10T03:33:46,847 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.9210 sec 2024-12-10T03:33:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:46,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T03:33:46,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:46,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:46,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:46,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:46,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:46,862 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:46,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/d21d6e036e82432eaf82f346dcbd0726 is 50, key is test_row_0/A:col10/1733801626860/Put/seqid=0 2024-12-10T03:33:46,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741896_1072 (size=14741) 2024-12-10T03:33:46,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/d21d6e036e82432eaf82f346dcbd0726 2024-12-10T03:33:46,906 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1cd152e352c549ec8ef3cd45a7096146 is 50, key is test_row_0/B:col10/1733801626860/Put/seqid=0 2024-12-10T03:33:46,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741897_1073 (size=12301) 2024-12-10T03:33:46,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1cd152e352c549ec8ef3cd45a7096146 2024-12-10T03:33:46,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/087f98bd903342d0b0ac9dcabd9c5140 is 50, key is test_row_0/C:col10/1733801626860/Put/seqid=0 2024-12-10T03:33:46,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801686930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801686933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801686937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:46,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801686937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:46,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741898_1074 (size=12301) 2024-12-10T03:33:46,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/087f98bd903342d0b0ac9dcabd9c5140 2024-12-10T03:33:46,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/d21d6e036e82432eaf82f346dcbd0726 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d21d6e036e82432eaf82f346dcbd0726 2024-12-10T03:33:46,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d21d6e036e82432eaf82f346dcbd0726, entries=200, sequenceid=298, filesize=14.4 K 2024-12-10T03:33:46,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1cd152e352c549ec8ef3cd45a7096146 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1cd152e352c549ec8ef3cd45a7096146 2024-12-10T03:33:46,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1cd152e352c549ec8ef3cd45a7096146, entries=150, sequenceid=298, filesize=12.0 K 2024-12-10T03:33:46,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/087f98bd903342d0b0ac9dcabd9c5140 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/087f98bd903342d0b0ac9dcabd9c5140 2024-12-10T03:33:46,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/087f98bd903342d0b0ac9dcabd9c5140, entries=150, sequenceid=298, filesize=12.0 K 2024-12-10T03:33:46,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for c1d70420fe32ca56286cb5d739cf351d in 138ms, sequenceid=298, compaction requested=true 2024-12-10T03:33:46,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:47,000 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:33:47,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:47,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:47,001 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:33:47,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:47,001 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51990 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:33:47,001 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:47,002 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:47,002 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/4fb6eab13a4a4d51b1c842200bf2109c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/44df4f200f36407e89a63a856d6d023a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a408d820e123434c85da0d1242966669, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d21d6e036e82432eaf82f346dcbd0726] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=50.8 K 2024-12-10T03:33:47,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:47,002 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fb6eab13a4a4d51b1c842200bf2109c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733801625510 2024-12-10T03:33:47,003 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49550 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:33:47,003 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:47,003 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:47,003 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a8fb61179e5f4d1480b91ee4b38c0bc3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f4b0050703194dcab940199c071eb782, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f620d060b0f745c5888faaf1e1361542, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1cd152e352c549ec8ef3cd45a7096146] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=48.4 K 2024-12-10T03:33:47,003 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44df4f200f36407e89a63a856d6d023a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733801625631 2024-12-10T03:33:47,003 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting a8fb61179e5f4d1480b91ee4b38c0bc3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733801625510 2024-12-10T03:33:47,004 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a408d820e123434c85da0d1242966669, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733801625786 2024-12-10T03:33:47,004 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f4b0050703194dcab940199c071eb782, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733801625631 2024-12-10T03:33:47,004 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting d21d6e036e82432eaf82f346dcbd0726, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733801626778 2024-12-10T03:33:47,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:47,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:47,005 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f620d060b0f745c5888faaf1e1361542, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733801625786 2024-12-10T03:33:47,005 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cd152e352c549ec8ef3cd45a7096146, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733801626778 2024-12-10T03:33:47,022 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:47,023 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d7a0d1aeb0c647049d4b4ef80eb0b8df is 50, key is test_row_0/B:col10/1733801626860/Put/seqid=0 2024-12-10T03:33:47,023 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#61 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:47,023 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/b5a13ddc259a4571ae238cfeabe11a40 is 50, key is test_row_0/A:col10/1733801626860/Put/seqid=0 2024-12-10T03:33:47,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741899_1075 (size=12983) 2024-12-10T03:33:47,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741900_1076 (size=12983) 2024-12-10T03:33:47,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:47,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T03:33:47,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:47,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:47,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:47,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:47,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:47,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:47,045 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/b5a13ddc259a4571ae238cfeabe11a40 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b5a13ddc259a4571ae238cfeabe11a40 2024-12-10T03:33:47,046 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d7a0d1aeb0c647049d4b4ef80eb0b8df as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d7a0d1aeb0c647049d4b4ef80eb0b8df 2024-12-10T03:33:47,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/c64a74b25ce043908d88fc60838c8d9b is 50, key is test_row_0/A:col10/1733801626926/Put/seqid=0 2024-12-10T03:33:47,059 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into b5a13ddc259a4571ae238cfeabe11a40(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:47,059 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into d7a0d1aeb0c647049d4b4ef80eb0b8df(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:47,059 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:47,059 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:47,059 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=12, startTime=1733801627001; duration=0sec 2024-12-10T03:33:47,059 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=12, startTime=1733801627000; duration=0sec 2024-12-10T03:33:47,060 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:47,060 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:47,060 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:47,060 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:47,060 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:33:47,062 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49550 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:33:47,062 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:47,063 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:47,063 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05454fc72e9142d9a78d9c04db257b98, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f0f90c1e65664fcbb6f81b0021f77e1b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/402bb7d762e14ce99ee106d0f12c1f52, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/087f98bd903342d0b0ac9dcabd9c5140] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=48.4 K 2024-12-10T03:33:47,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801687057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,064 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 05454fc72e9142d9a78d9c04db257b98, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733801625510 2024-12-10T03:33:47,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801687057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801687058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,065 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f0f90c1e65664fcbb6f81b0021f77e1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733801625631 2024-12-10T03:33:47,066 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 402bb7d762e14ce99ee106d0f12c1f52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1733801625786 2024-12-10T03:33:47,066 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 087f98bd903342d0b0ac9dcabd9c5140, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733801626778 2024-12-10T03:33:47,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801687062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,089 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#63 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:47,091 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/d9d30c92bc364b668108f1657a39d0b5 is 50, key is test_row_0/C:col10/1733801626860/Put/seqid=0 2024-12-10T03:33:47,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741901_1077 (size=14741) 2024-12-10T03:33:47,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/c64a74b25ce043908d88fc60838c8d9b 2024-12-10T03:33:47,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741902_1078 (size=12983) 2024-12-10T03:33:47,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/a282ff605ed04d2c8ab73ed2bd1ceb7e is 50, key is test_row_0/B:col10/1733801626926/Put/seqid=0 2024-12-10T03:33:47,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741903_1079 (size=12301) 2024-12-10T03:33:47,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/a282ff605ed04d2c8ab73ed2bd1ceb7e 2024-12-10T03:33:47,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/6a95dc5af51645ff988b92303c4fda31 is 50, key is test_row_0/C:col10/1733801626926/Put/seqid=0 2024-12-10T03:33:47,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741904_1080 (size=12301) 2024-12-10T03:33:47,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/6a95dc5af51645ff988b92303c4fda31 2024-12-10T03:33:47,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/c64a74b25ce043908d88fc60838c8d9b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/c64a74b25ce043908d88fc60838c8d9b 2024-12-10T03:33:47,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801687166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801687166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801687167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/c64a74b25ce043908d88fc60838c8d9b, entries=200, sequenceid=322, filesize=14.4 K 2024-12-10T03:33:47,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801687168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/a282ff605ed04d2c8ab73ed2bd1ceb7e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a282ff605ed04d2c8ab73ed2bd1ceb7e 2024-12-10T03:33:47,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a282ff605ed04d2c8ab73ed2bd1ceb7e, entries=150, sequenceid=322, filesize=12.0 K 2024-12-10T03:33:47,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/6a95dc5af51645ff988b92303c4fda31 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/6a95dc5af51645ff988b92303c4fda31 2024-12-10T03:33:47,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/6a95dc5af51645ff988b92303c4fda31, entries=150, sequenceid=322, filesize=12.0 K 2024-12-10T03:33:47,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c1d70420fe32ca56286cb5d739cf351d in 151ms, sequenceid=322, compaction requested=false 2024-12-10T03:33:47,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:47,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:47,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:33:47,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:47,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:47,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:47,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:47,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:47,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:47,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/92fd359d7c7646d7933106fa7d31d243 is 50, key is test_row_0/A:col10/1733801627370/Put/seqid=0 2024-12-10T03:33:47,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741905_1081 (size=12301) 2024-12-10T03:33:47,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801687426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801687431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801687431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801687432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,519 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/d9d30c92bc364b668108f1657a39d0b5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d9d30c92bc364b668108f1657a39d0b5 2024-12-10T03:33:47,527 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into d9d30c92bc364b668108f1657a39d0b5(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:47,527 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:47,527 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=12, startTime=1733801627002; duration=0sec 2024-12-10T03:33:47,527 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:47,527 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:47,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801687532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801687536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801687537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801687537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801687636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801687735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801687739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801687740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:47,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801687741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:47,794 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/92fd359d7c7646d7933106fa7d31d243 2024-12-10T03:33:47,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c5ab85cfefe44e75a58f6282da18c665 is 50, key is test_row_0/B:col10/1733801627370/Put/seqid=0 2024-12-10T03:33:47,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741906_1082 (size=12301) 2024-12-10T03:33:47,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c5ab85cfefe44e75a58f6282da18c665 2024-12-10T03:33:47,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/7347a6a0954f4e22a706cf68a1b27cd8 is 50, key is test_row_0/C:col10/1733801627370/Put/seqid=0 2024-12-10T03:33:47,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741907_1083 (size=12301) 2024-12-10T03:33:48,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T03:33:48,038 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-10T03:33:48,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-10T03:33:48,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801688039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T03:33:48,043 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:48,044 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:48,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:48,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801688043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801688045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801688051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T03:33:48,196 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T03:33:48,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:48,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:48,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:48,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:48,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:48,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:48,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/7347a6a0954f4e22a706cf68a1b27cd8 2024-12-10T03:33:48,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/92fd359d7c7646d7933106fa7d31d243 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/92fd359d7c7646d7933106fa7d31d243 2024-12-10T03:33:48,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/92fd359d7c7646d7933106fa7d31d243, entries=150, sequenceid=337, filesize=12.0 K 2024-12-10T03:33:48,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c5ab85cfefe44e75a58f6282da18c665 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c5ab85cfefe44e75a58f6282da18c665 2024-12-10T03:33:48,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c5ab85cfefe44e75a58f6282da18c665, entries=150, sequenceid=337, filesize=12.0 K 2024-12-10T03:33:48,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/7347a6a0954f4e22a706cf68a1b27cd8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7347a6a0954f4e22a706cf68a1b27cd8 2024-12-10T03:33:48,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7347a6a0954f4e22a706cf68a1b27cd8, entries=150, sequenceid=337, filesize=12.0 K 2024-12-10T03:33:48,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for c1d70420fe32ca56286cb5d739cf351d in 882ms, sequenceid=337, compaction requested=true 2024-12-10T03:33:48,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:48,255 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:48,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:48,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:48,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:48,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:48,255 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:48,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:48,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:48,256 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:48,256 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:48,257 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:48,257 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:48,257 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b5a13ddc259a4571ae238cfeabe11a40, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/c64a74b25ce043908d88fc60838c8d9b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/92fd359d7c7646d7933106fa7d31d243] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=39.1 K 2024-12-10T03:33:48,257 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:48,257 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:48,257 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d7a0d1aeb0c647049d4b4ef80eb0b8df, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a282ff605ed04d2c8ab73ed2bd1ceb7e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c5ab85cfefe44e75a58f6282da18c665] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.7 K 2024-12-10T03:33:48,258 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5a13ddc259a4571ae238cfeabe11a40, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733801626778 2024-12-10T03:33:48,258 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d7a0d1aeb0c647049d4b4ef80eb0b8df, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733801626778 2024-12-10T03:33:48,258 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting c64a74b25ce043908d88fc60838c8d9b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1733801626920 2024-12-10T03:33:48,258 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting a282ff605ed04d2c8ab73ed2bd1ceb7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1733801626920 2024-12-10T03:33:48,259 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92fd359d7c7646d7933106fa7d31d243, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733801627053 2024-12-10T03:33:48,259 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c5ab85cfefe44e75a58f6282da18c665, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733801627053 2024-12-10T03:33:48,271 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:48,272 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c31bc6b1d4e346448eb635ca8f2c758a is 50, key is test_row_0/B:col10/1733801627370/Put/seqid=0 2024-12-10T03:33:48,281 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:48,282 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/b6d876bdc36849ecb460972db335f2ca is 50, key is test_row_0/A:col10/1733801627370/Put/seqid=0 2024-12-10T03:33:48,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741908_1084 (size=13085) 2024-12-10T03:33:48,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741909_1085 (size=13085) 2024-12-10T03:33:48,314 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/b6d876bdc36849ecb460972db335f2ca as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6d876bdc36849ecb460972db335f2ca 2024-12-10T03:33:48,322 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into b6d876bdc36849ecb460972db335f2ca(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:48,322 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:48,322 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801628255; duration=0sec 2024-12-10T03:33:48,323 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:48,323 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:48,323 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:48,324 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:48,324 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:48,324 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:48,325 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d9d30c92bc364b668108f1657a39d0b5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/6a95dc5af51645ff988b92303c4fda31, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7347a6a0954f4e22a706cf68a1b27cd8] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.7 K 2024-12-10T03:33:48,325 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9d30c92bc364b668108f1657a39d0b5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733801626778 2024-12-10T03:33:48,326 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a95dc5af51645ff988b92303c4fda31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1733801626920 2024-12-10T03:33:48,326 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7347a6a0954f4e22a706cf68a1b27cd8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733801627053 2024-12-10T03:33:48,334 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#71 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:48,334 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/05bbe9e2af2c4c09ab904c20def86b09 is 50, key is test_row_0/C:col10/1733801627370/Put/seqid=0 2024-12-10T03:33:48,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T03:33:48,350 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T03:33:48,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:48,351 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:33:48,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:48,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:48,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:48,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:48,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:48,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:48,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/36ac2bf15bff4177a70b1654c43e2446 is 50, key is test_row_0/A:col10/1733801627428/Put/seqid=0 2024-12-10T03:33:48,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741910_1086 (size=13085) 2024-12-10T03:33:48,367 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/05bbe9e2af2c4c09ab904c20def86b09 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05bbe9e2af2c4c09ab904c20def86b09 2024-12-10T03:33:48,375 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 05bbe9e2af2c4c09ab904c20def86b09(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:48,375 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:48,375 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801628255; duration=0sec 2024-12-10T03:33:48,375 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:48,375 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:48,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741911_1087 (size=12301) 2024-12-10T03:33:48,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:48,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:48,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801688558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801688558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801688559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801688559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T03:33:48,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801688663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801688663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801688663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801688664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,708 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/c31bc6b1d4e346448eb635ca8f2c758a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c31bc6b1d4e346448eb635ca8f2c758a 2024-12-10T03:33:48,718 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into c31bc6b1d4e346448eb635ca8f2c758a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:48,718 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:48,719 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801628255; duration=0sec 2024-12-10T03:33:48,719 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:48,719 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:48,778 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/36ac2bf15bff4177a70b1654c43e2446 2024-12-10T03:33:48,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/722d9e8dc9c04b7fb0b47b0797f42dce is 50, key is test_row_0/B:col10/1733801627428/Put/seqid=0 2024-12-10T03:33:48,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741912_1088 (size=12301) 2024-12-10T03:33:48,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801688865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801688866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801688867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:48,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801688867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T03:33:49,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801689168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801689170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801689171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801689172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,192 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/722d9e8dc9c04b7fb0b47b0797f42dce 2024-12-10T03:33:49,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/2b506dedf7274215bc17a30a12fa91a8 is 50, key is test_row_0/C:col10/1733801627428/Put/seqid=0 2024-12-10T03:33:49,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741913_1089 (size=12301) 2024-12-10T03:33:49,227 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/2b506dedf7274215bc17a30a12fa91a8 2024-12-10T03:33:49,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/36ac2bf15bff4177a70b1654c43e2446 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/36ac2bf15bff4177a70b1654c43e2446 2024-12-10T03:33:49,244 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/36ac2bf15bff4177a70b1654c43e2446, entries=150, sequenceid=364, filesize=12.0 K 2024-12-10T03:33:49,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/722d9e8dc9c04b7fb0b47b0797f42dce as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/722d9e8dc9c04b7fb0b47b0797f42dce 2024-12-10T03:33:49,256 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/722d9e8dc9c04b7fb0b47b0797f42dce, entries=150, sequenceid=364, filesize=12.0 K 2024-12-10T03:33:49,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/2b506dedf7274215bc17a30a12fa91a8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2b506dedf7274215bc17a30a12fa91a8 2024-12-10T03:33:49,264 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2b506dedf7274215bc17a30a12fa91a8, entries=150, sequenceid=364, filesize=12.0 K 2024-12-10T03:33:49,266 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for c1d70420fe32ca56286cb5d739cf351d in 915ms, sequenceid=364, compaction requested=false 2024-12-10T03:33:49,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:49,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:49,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-10T03:33:49,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-10T03:33:49,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-10T03:33:49,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2240 sec 2024-12-10T03:33:49,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.2310 sec 2024-12-10T03:33:49,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:49,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:33:49,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:49,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:49,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:49,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:49,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:49,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:49,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/d8574480e8b444fa97ec0194d55df7f3 is 50, key is test_row_0/A:col10/1733801628558/Put/seqid=0 2024-12-10T03:33:49,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741914_1090 (size=12301) 2024-12-10T03:33:49,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/d8574480e8b444fa97ec0194d55df7f3 2024-12-10T03:33:49,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/775c7baad6d84011a4e6fdc52228d613 is 50, key is test_row_0/B:col10/1733801628558/Put/seqid=0 2024-12-10T03:33:49,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741915_1091 (size=12301) 2024-12-10T03:33:49,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/775c7baad6d84011a4e6fdc52228d613 2024-12-10T03:33:49,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801689693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801689696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801689697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801689696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801689701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/300427e56dc54ca6b18022b8a33226f5 is 50, key is test_row_0/C:col10/1733801628558/Put/seqid=0 2024-12-10T03:33:49,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741916_1092 (size=12301) 2024-12-10T03:33:49,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/300427e56dc54ca6b18022b8a33226f5 2024-12-10T03:33:49,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/d8574480e8b444fa97ec0194d55df7f3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d8574480e8b444fa97ec0194d55df7f3 2024-12-10T03:33:49,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d8574480e8b444fa97ec0194d55df7f3, entries=150, sequenceid=379, filesize=12.0 K 2024-12-10T03:33:49,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/775c7baad6d84011a4e6fdc52228d613 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/775c7baad6d84011a4e6fdc52228d613 2024-12-10T03:33:49,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/775c7baad6d84011a4e6fdc52228d613, entries=150, sequenceid=379, filesize=12.0 K 2024-12-10T03:33:49,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/300427e56dc54ca6b18022b8a33226f5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/300427e56dc54ca6b18022b8a33226f5 2024-12-10T03:33:49,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/300427e56dc54ca6b18022b8a33226f5, entries=150, sequenceid=379, filesize=12.0 K 2024-12-10T03:33:49,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for c1d70420fe32ca56286cb5d739cf351d in 118ms, sequenceid=379, compaction requested=true 2024-12-10T03:33:49,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:49,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:49,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:49,766 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:49,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:49,766 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:49,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:49,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:49,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:49,768 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:49,768 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:49,769 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:49,769 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c31bc6b1d4e346448eb635ca8f2c758a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/722d9e8dc9c04b7fb0b47b0797f42dce, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/775c7baad6d84011a4e6fdc52228d613] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.8 K 2024-12-10T03:33:49,769 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:49,769 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:49,770 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:49,770 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6d876bdc36849ecb460972db335f2ca, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/36ac2bf15bff4177a70b1654c43e2446, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d8574480e8b444fa97ec0194d55df7f3] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.8 K 2024-12-10T03:33:49,770 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c31bc6b1d4e346448eb635ca8f2c758a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733801627053 2024-12-10T03:33:49,770 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6d876bdc36849ecb460972db335f2ca, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733801627053 2024-12-10T03:33:49,770 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 722d9e8dc9c04b7fb0b47b0797f42dce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733801627422 2024-12-10T03:33:49,771 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36ac2bf15bff4177a70b1654c43e2446, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733801627422 2024-12-10T03:33:49,771 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 775c7baad6d84011a4e6fdc52228d613, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733801628557 2024-12-10T03:33:49,771 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8574480e8b444fa97ec0194d55df7f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733801628557 2024-12-10T03:33:49,784 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:49,784 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1b5d1327e0a94dff91308c91d0a3b906 is 50, key is test_row_0/B:col10/1733801628558/Put/seqid=0 2024-12-10T03:33:49,791 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#79 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:49,792 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/48e263bc315242ad917c579c2f261d5f is 50, key is test_row_0/A:col10/1733801628558/Put/seqid=0 2024-12-10T03:33:49,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741918_1094 (size=13187) 2024-12-10T03:33:49,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741917_1093 (size=13187) 2024-12-10T03:33:49,804 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1b5d1327e0a94dff91308c91d0a3b906 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1b5d1327e0a94dff91308c91d0a3b906 2024-12-10T03:33:49,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:49,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T03:33:49,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:49,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:49,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:49,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:49,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:49,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:49,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/f9045a348e104e148d4cb250c5afab48 is 50, key is test_row_0/A:col10/1733801629804/Put/seqid=0 2024-12-10T03:33:49,816 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into 1b5d1327e0a94dff91308c91d0a3b906(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:49,816 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:49,816 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801629766; duration=0sec 2024-12-10T03:33:49,816 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:49,816 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:49,816 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:49,818 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:49,818 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:49,818 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:49,818 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05bbe9e2af2c4c09ab904c20def86b09, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2b506dedf7274215bc17a30a12fa91a8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/300427e56dc54ca6b18022b8a33226f5] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.8 K 2024-12-10T03:33:49,819 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 05bbe9e2af2c4c09ab904c20def86b09, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1733801627053 2024-12-10T03:33:49,819 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b506dedf7274215bc17a30a12fa91a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733801627422 2024-12-10T03:33:49,820 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 300427e56dc54ca6b18022b8a33226f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733801628557 2024-12-10T03:33:49,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801689817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801689817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801689822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801689823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801689824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741919_1095 (size=12301) 2024-12-10T03:33:49,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/f9045a348e104e148d4cb250c5afab48 2024-12-10T03:33:49,831 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:49,832 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/1ee4f10d20c74e3b823d6d79bce0e6fc is 50, key is test_row_0/C:col10/1733801628558/Put/seqid=0 2024-12-10T03:33:49,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741920_1096 (size=13187) 2024-12-10T03:33:49,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/056b7fc6f0314ea2bab85e4f97bd6143 is 50, key is test_row_0/B:col10/1733801629804/Put/seqid=0 2024-12-10T03:33:49,859 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/1ee4f10d20c74e3b823d6d79bce0e6fc as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/1ee4f10d20c74e3b823d6d79bce0e6fc 2024-12-10T03:33:49,867 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 1ee4f10d20c74e3b823d6d79bce0e6fc(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:49,868 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:49,868 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801629766; duration=0sec 2024-12-10T03:33:49,868 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:49,868 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:49,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741921_1097 (size=12301) 2024-12-10T03:33:49,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/056b7fc6f0314ea2bab85e4f97bd6143 2024-12-10T03:33:49,904 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a3c3cb181975485da79cc8576284e82b is 50, key is test_row_0/C:col10/1733801629804/Put/seqid=0 2024-12-10T03:33:49,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741922_1098 (size=12301) 2024-12-10T03:33:49,918 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=403 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a3c3cb181975485da79cc8576284e82b 2024-12-10T03:33:49,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801689926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801689926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/f9045a348e104e148d4cb250c5afab48 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f9045a348e104e148d4cb250c5afab48 2024-12-10T03:33:49,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801689926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801689928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801689928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:49,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f9045a348e104e148d4cb250c5afab48, entries=150, sequenceid=403, filesize=12.0 K 2024-12-10T03:33:49,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/056b7fc6f0314ea2bab85e4f97bd6143 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/056b7fc6f0314ea2bab85e4f97bd6143 2024-12-10T03:33:49,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/056b7fc6f0314ea2bab85e4f97bd6143, entries=150, sequenceid=403, filesize=12.0 K 2024-12-10T03:33:49,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a3c3cb181975485da79cc8576284e82b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a3c3cb181975485da79cc8576284e82b 2024-12-10T03:33:49,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a3c3cb181975485da79cc8576284e82b, entries=150, sequenceid=403, filesize=12.0 K 2024-12-10T03:33:49,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c1d70420fe32ca56286cb5d739cf351d in 157ms, sequenceid=403, compaction requested=false 2024-12-10T03:33:49,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:50,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:50,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:33:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:50,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:50,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T03:33:50,147 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-10T03:33:50,149 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:50,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801690147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801690150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801690152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801690152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801690153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-10T03:33:50,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T03:33:50,165 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:50,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/af870d9ea93f4110bc5816c3ca7846ca is 50, key is test_row_0/A:col10/1733801629821/Put/seqid=0 2024-12-10T03:33:50,165 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:50,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:50,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741923_1099 (size=12301) 2024-12-10T03:33:50,204 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/48e263bc315242ad917c579c2f261d5f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/48e263bc315242ad917c579c2f261d5f 2024-12-10T03:33:50,215 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into 48e263bc315242ad917c579c2f261d5f(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:50,215 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:50,215 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801629766; duration=0sec 2024-12-10T03:33:50,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:50,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:50,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801690254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801690257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801690258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801690259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801690259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T03:33:50,318 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:50,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:50,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801690455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801690458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801690461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801690462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801690463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T03:33:50,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:50,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:50,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/af870d9ea93f4110bc5816c3ca7846ca 2024-12-10T03:33:50,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/34ebc19078b146e68f76f35dece0498f is 50, key is test_row_0/B:col10/1733801629821/Put/seqid=0 2024-12-10T03:33:50,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741924_1100 (size=12301) 2024-12-10T03:33:50,626 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:50,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:50,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801690759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801690761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801690763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801690765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T03:33:50,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801690765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,779 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:50,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:50,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,932 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:50,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:50,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:50,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:50,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:50,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/34ebc19078b146e68f76f35dece0498f 2024-12-10T03:33:50,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/c3d08f652dd64e749fcfc27949f49d29 is 50, key is test_row_0/C:col10/1733801629821/Put/seqid=0 2024-12-10T03:33:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741925_1101 (size=12301) 2024-12-10T03:33:51,085 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:51,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:51,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,237 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:51,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:51,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,238 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:51,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801691263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T03:33:51,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:51,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801691268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:51,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801691268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:51,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801691269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:51,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801691271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,390 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:51,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:51,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:51,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=418 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/c3d08f652dd64e749fcfc27949f49d29 2024-12-10T03:33:51,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/af870d9ea93f4110bc5816c3ca7846ca as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/af870d9ea93f4110bc5816c3ca7846ca 2024-12-10T03:33:51,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/af870d9ea93f4110bc5816c3ca7846ca, entries=150, sequenceid=418, filesize=12.0 K 2024-12-10T03:33:51,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/34ebc19078b146e68f76f35dece0498f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/34ebc19078b146e68f76f35dece0498f 2024-12-10T03:33:51,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/34ebc19078b146e68f76f35dece0498f, entries=150, sequenceid=418, filesize=12.0 K 2024-12-10T03:33:51,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/c3d08f652dd64e749fcfc27949f49d29 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c3d08f652dd64e749fcfc27949f49d29 2024-12-10T03:33:51,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c3d08f652dd64e749fcfc27949f49d29, entries=150, sequenceid=418, filesize=12.0 K 2024-12-10T03:33:51,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for c1d70420fe32ca56286cb5d739cf351d in 1290ms, sequenceid=418, compaction requested=true 2024-12-10T03:33:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:51,420 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:51,420 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:51,421 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:51,421 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:51,421 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:51,421 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:51,421 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,421 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,421 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1b5d1327e0a94dff91308c91d0a3b906, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/056b7fc6f0314ea2bab85e4f97bd6143, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/34ebc19078b146e68f76f35dece0498f] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.9 K 2024-12-10T03:33:51,421 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/48e263bc315242ad917c579c2f261d5f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f9045a348e104e148d4cb250c5afab48, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/af870d9ea93f4110bc5816c3ca7846ca] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.9 K 2024-12-10T03:33:51,422 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b5d1327e0a94dff91308c91d0a3b906, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733801628557 2024-12-10T03:33:51,422 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48e263bc315242ad917c579c2f261d5f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733801628557 2024-12-10T03:33:51,422 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 056b7fc6f0314ea2bab85e4f97bd6143, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1733801629687 2024-12-10T03:33:51,422 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9045a348e104e148d4cb250c5afab48, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1733801629687 2024-12-10T03:33:51,422 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 34ebc19078b146e68f76f35dece0498f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733801629821 2024-12-10T03:33:51,422 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting af870d9ea93f4110bc5816c3ca7846ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733801629821 2024-12-10T03:33:51,431 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:51,431 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:51,431 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/1ede28bf76e04a12a776d5ce2c53f3e7 is 50, key is test_row_0/A:col10/1733801629821/Put/seqid=0 2024-12-10T03:33:51,431 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/3ad6ac936e8945c09218589c91c3606e is 50, key is test_row_0/B:col10/1733801629821/Put/seqid=0 2024-12-10T03:33:51,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741927_1103 (size=13289) 2024-12-10T03:33:51,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741926_1102 (size=13289) 2024-12-10T03:33:51,457 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/3ad6ac936e8945c09218589c91c3606e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/3ad6ac936e8945c09218589c91c3606e 2024-12-10T03:33:51,463 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into 3ad6ac936e8945c09218589c91c3606e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:51,463 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:51,463 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801631420; duration=0sec 2024-12-10T03:33:51,463 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:51,464 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:51,464 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:51,465 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:51,465 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:51,465 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,465 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/1ee4f10d20c74e3b823d6d79bce0e6fc, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a3c3cb181975485da79cc8576284e82b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c3d08f652dd64e749fcfc27949f49d29] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=36.9 K 2024-12-10T03:33:51,466 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ee4f10d20c74e3b823d6d79bce0e6fc, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1733801628557 2024-12-10T03:33:51,466 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting a3c3cb181975485da79cc8576284e82b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=403, earliestPutTs=1733801629687 2024-12-10T03:33:51,467 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c3d08f652dd64e749fcfc27949f49d29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733801629821 2024-12-10T03:33:51,474 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:51,475 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/d7b9f5ed3bbd497599087305d589ffc9 is 50, key is test_row_0/C:col10/1733801629821/Put/seqid=0 2024-12-10T03:33:51,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741928_1104 (size=13289) 2024-12-10T03:33:51,490 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/d7b9f5ed3bbd497599087305d589ffc9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d7b9f5ed3bbd497599087305d589ffc9 2024-12-10T03:33:51,498 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into d7b9f5ed3bbd497599087305d589ffc9(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:51,498 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:51,498 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801631420; duration=0sec 2024-12-10T03:33:51,498 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:51,498 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:51,543 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:51,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T03:33:51,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:51,544 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:33:51,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:51,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:51,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:51,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:51,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:51,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:51,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/37314e2237f444f8a17fe0369c75c255 is 50, key is test_row_0/A:col10/1733801630149/Put/seqid=0 2024-12-10T03:33:51,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741929_1105 (size=12301) 2024-12-10T03:33:51,557 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/37314e2237f444f8a17fe0369c75c255 2024-12-10T03:33:51,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/5c4bb1635b9f403887973cf2eff6c180 is 50, key is test_row_0/B:col10/1733801630149/Put/seqid=0 2024-12-10T03:33:51,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741930_1106 (size=12301) 2024-12-10T03:33:51,856 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/1ede28bf76e04a12a776d5ce2c53f3e7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1ede28bf76e04a12a776d5ce2c53f3e7 2024-12-10T03:33:51,862 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into 1ede28bf76e04a12a776d5ce2c53f3e7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:51,862 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:51,862 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801631420; duration=0sec 2024-12-10T03:33:51,862 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:51,862 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:51,980 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/5c4bb1635b9f403887973cf2eff6c180 2024-12-10T03:33:51,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/b19713258592463c95c9f6cc184cc890 is 50, key is test_row_0/C:col10/1733801630149/Put/seqid=0 2024-12-10T03:33:51,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741931_1107 (size=12301) 2024-12-10T03:33:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T03:33:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:52,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:52,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801692282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801692283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801692283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801692284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801692284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801692385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801692388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801692388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801692388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801692388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,393 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/b19713258592463c95c9f6cc184cc890 2024-12-10T03:33:52,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/37314e2237f444f8a17fe0369c75c255 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/37314e2237f444f8a17fe0369c75c255 2024-12-10T03:33:52,406 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/37314e2237f444f8a17fe0369c75c255, entries=150, sequenceid=446, filesize=12.0 K 2024-12-10T03:33:52,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/5c4bb1635b9f403887973cf2eff6c180 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5c4bb1635b9f403887973cf2eff6c180 2024-12-10T03:33:52,416 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5c4bb1635b9f403887973cf2eff6c180, entries=150, sequenceid=446, filesize=12.0 K 2024-12-10T03:33:52,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/b19713258592463c95c9f6cc184cc890 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/b19713258592463c95c9f6cc184cc890 2024-12-10T03:33:52,422 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/b19713258592463c95c9f6cc184cc890, entries=150, sequenceid=446, filesize=12.0 K 2024-12-10T03:33:52,424 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for c1d70420fe32ca56286cb5d739cf351d in 879ms, sequenceid=446, compaction requested=false 2024-12-10T03:33:52,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:52,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:52,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-10T03:33:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-10T03:33:52,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-10T03:33:52,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2590 sec 2024-12-10T03:33:52,428 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.2780 sec 2024-12-10T03:33:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:52,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:33:52,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:52,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:52,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:52,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:52,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:52,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:52,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/79b8a2273fb34656b129eb4966b46c03 is 50, key is test_row_0/A:col10/1733801632589/Put/seqid=0 2024-12-10T03:33:52,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801692607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801692608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801692608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801692609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801692609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741932_1108 (size=12301) 2024-12-10T03:33:52,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801692710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801692711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801692711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801692712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801692712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801692913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801692913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801692913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801692914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:52,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801692914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/79b8a2273fb34656b129eb4966b46c03 2024-12-10T03:33:53,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/64cd130bf3c24911bc506e4bdf85d77e is 50, key is test_row_0/B:col10/1733801632589/Put/seqid=0 2024-12-10T03:33:53,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741933_1109 (size=12301) 2024-12-10T03:33:53,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801693216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801693217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801693217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801693217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801693217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,428 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/64cd130bf3c24911bc506e4bdf85d77e 2024-12-10T03:33:53,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/f5f46b1715594cd496dd071bca5bc255 is 50, key is test_row_0/C:col10/1733801632589/Put/seqid=0 2024-12-10T03:33:53,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741934_1110 (size=12301) 2024-12-10T03:33:53,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801693734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801693734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801693734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,737 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801693734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801693734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:53,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/f5f46b1715594cd496dd071bca5bc255 2024-12-10T03:33:53,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/79b8a2273fb34656b129eb4966b46c03 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/79b8a2273fb34656b129eb4966b46c03 2024-12-10T03:33:53,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/79b8a2273fb34656b129eb4966b46c03, entries=150, sequenceid=460, filesize=12.0 K 2024-12-10T03:33:53,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/64cd130bf3c24911bc506e4bdf85d77e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/64cd130bf3c24911bc506e4bdf85d77e 2024-12-10T03:33:53,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/64cd130bf3c24911bc506e4bdf85d77e, entries=150, sequenceid=460, filesize=12.0 K 2024-12-10T03:33:53,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/f5f46b1715594cd496dd071bca5bc255 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f5f46b1715594cd496dd071bca5bc255 2024-12-10T03:33:53,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f5f46b1715594cd496dd071bca5bc255, entries=150, sequenceid=460, filesize=12.0 K 2024-12-10T03:33:53,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for c1d70420fe32ca56286cb5d739cf351d in 1273ms, sequenceid=460, compaction requested=true 2024-12-10T03:33:53,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:53,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:53,864 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:53,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:53,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:53,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:53,864 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:53,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:53,864 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:53,865 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:53,865 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:53,865 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:53,865 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/3ad6ac936e8945c09218589c91c3606e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5c4bb1635b9f403887973cf2eff6c180, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/64cd130bf3c24911bc506e4bdf85d77e] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=37.0 K 2024-12-10T03:33:53,865 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:53,865 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:53,866 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:53,866 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1ede28bf76e04a12a776d5ce2c53f3e7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/37314e2237f444f8a17fe0369c75c255, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/79b8a2273fb34656b129eb4966b46c03] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=37.0 K 2024-12-10T03:33:53,866 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ad6ac936e8945c09218589c91c3606e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733801629821 2024-12-10T03:33:53,866 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ede28bf76e04a12a776d5ce2c53f3e7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733801629821 2024-12-10T03:33:53,866 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c4bb1635b9f403887973cf2eff6c180, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1733801630148 2024-12-10T03:33:53,866 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37314e2237f444f8a17fe0369c75c255, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1733801630148 2024-12-10T03:33:53,867 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 64cd130bf3c24911bc506e4bdf85d77e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733801632282 2024-12-10T03:33:53,867 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79b8a2273fb34656b129eb4966b46c03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733801632282 2024-12-10T03:33:53,873 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:53,874 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/e2e5f1245a3540dabb8799aca9c59d2c is 50, key is test_row_0/B:col10/1733801632589/Put/seqid=0 2024-12-10T03:33:53,874 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#97 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:53,874 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6d5272a2ab3b442f9f4075b2835680d2 is 50, key is test_row_0/A:col10/1733801632589/Put/seqid=0 2024-12-10T03:33:53,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741935_1111 (size=13391) 2024-12-10T03:33:53,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741936_1112 (size=13391) 2024-12-10T03:33:54,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T03:33:54,269 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-10T03:33:54,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:54,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-10T03:33:54,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T03:33:54,271 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:54,272 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:54,272 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:54,294 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/e2e5f1245a3540dabb8799aca9c59d2c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e2e5f1245a3540dabb8799aca9c59d2c 2024-12-10T03:33:54,295 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6d5272a2ab3b442f9f4075b2835680d2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6d5272a2ab3b442f9f4075b2835680d2 2024-12-10T03:33:54,300 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into e2e5f1245a3540dabb8799aca9c59d2c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:54,301 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:54,301 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801633864; duration=0sec 2024-12-10T03:33:54,301 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into 6d5272a2ab3b442f9f4075b2835680d2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:54,301 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:54,301 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:54,301 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801633864; duration=0sec 2024-12-10T03:33:54,301 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:54,301 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:54,302 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:54,302 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:54,302 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:54,302 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:54,302 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:54,303 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d7b9f5ed3bbd497599087305d589ffc9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/b19713258592463c95c9f6cc184cc890, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f5f46b1715594cd496dd071bca5bc255] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=37.0 K 2024-12-10T03:33:54,303 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d7b9f5ed3bbd497599087305d589ffc9, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=418, earliestPutTs=1733801629821 2024-12-10T03:33:54,304 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting b19713258592463c95c9f6cc184cc890, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=446, earliestPutTs=1733801630148 2024-12-10T03:33:54,304 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f5f46b1715594cd496dd071bca5bc255, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733801632282 2024-12-10T03:33:54,312 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#98 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:54,313 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/11b8f0e07c0f44e19c07abffc5a098c0 is 50, key is test_row_0/C:col10/1733801632589/Put/seqid=0 2024-12-10T03:33:54,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741937_1113 (size=13391) 2024-12-10T03:33:54,329 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/11b8f0e07c0f44e19c07abffc5a098c0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/11b8f0e07c0f44e19c07abffc5a098c0 2024-12-10T03:33:54,334 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 11b8f0e07c0f44e19c07abffc5a098c0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:54,334 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:54,334 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801633864; duration=0sec 2024-12-10T03:33:54,335 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:54,335 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:54,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T03:33:54,423 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-10T03:33:54,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:54,424 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T03:33:54,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:54,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:54,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:54,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:54,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:54,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:54,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/9da96bcf6bd147eb9389185b8835c056 is 50, key is test_row_0/A:col10/1733801632608/Put/seqid=0 2024-12-10T03:33:54,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741938_1114 (size=12301) 2024-12-10T03:33:54,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T03:33:54,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:54,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:54,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801694743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801694743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801694743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801694744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801694747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,832 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/9da96bcf6bd147eb9389185b8835c056 2024-12-10T03:33:54,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/387bd393d5244887a9689dbc9401e1fe is 50, key is test_row_0/B:col10/1733801632608/Put/seqid=0 2024-12-10T03:33:54,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741939_1115 (size=12301) 2024-12-10T03:33:54,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801694845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801694846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801694846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:54,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801694847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:54,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T03:33:55,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801695047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801695048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801695048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801695049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,246 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/387bd393d5244887a9689dbc9401e1fe 2024-12-10T03:33:55,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/c7bd196f3a174b22bb873669b2682ad5 is 50, key is test_row_0/C:col10/1733801632608/Put/seqid=0 2024-12-10T03:33:55,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741940_1116 (size=12301) 2024-12-10T03:33:55,294 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/c7bd196f3a174b22bb873669b2682ad5 2024-12-10T03:33:55,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/9da96bcf6bd147eb9389185b8835c056 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9da96bcf6bd147eb9389185b8835c056 2024-12-10T03:33:55,311 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9da96bcf6bd147eb9389185b8835c056, entries=150, sequenceid=487, filesize=12.0 K 2024-12-10T03:33:55,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/387bd393d5244887a9689dbc9401e1fe as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/387bd393d5244887a9689dbc9401e1fe 2024-12-10T03:33:55,321 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/387bd393d5244887a9689dbc9401e1fe, entries=150, sequenceid=487, filesize=12.0 K 2024-12-10T03:33:55,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/c7bd196f3a174b22bb873669b2682ad5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c7bd196f3a174b22bb873669b2682ad5 2024-12-10T03:33:55,330 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c7bd196f3a174b22bb873669b2682ad5, entries=150, sequenceid=487, filesize=12.0 K 2024-12-10T03:33:55,332 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c1d70420fe32ca56286cb5d739cf351d in 908ms, sequenceid=487, compaction requested=false 2024-12-10T03:33:55,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:55,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-10T03:33:55,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-10T03:33:55,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-10T03:33:55,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0610 sec 2024-12-10T03:33:55,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.0650 sec 2024-12-10T03:33:55,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:55,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:33:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:55,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:55,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/2ef440726eae438fab375e337570b60f is 50, key is test_row_0/A:col10/1733801635350/Put/seqid=0 2024-12-10T03:33:55,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741941_1117 (size=14741) 2024-12-10T03:33:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801695368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801695369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801695369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801695370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T03:33:55,374 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-10T03:33:55,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:55,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-10T03:33:55,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T03:33:55,378 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:55,378 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:55,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:55,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801695472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801695472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801695472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T03:33:55,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801695476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,530 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-10T03:33:55,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:55,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:55,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:55,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:55,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801695675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801695675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801695675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T03:33:55,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801695679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,683 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-10T03:33:55,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:55,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:55,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:55,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:55,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/2ef440726eae438fab375e337570b60f 2024-12-10T03:33:55,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d272460a66a940da9adfd62ddf936a67 is 50, key is test_row_0/B:col10/1733801635350/Put/seqid=0 2024-12-10T03:33:55,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741942_1118 (size=12301) 2024-12-10T03:33:55,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d272460a66a940da9adfd62ddf936a67 2024-12-10T03:33:55,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/710aa0ea16754e968d5203ca31a9314a is 50, key is test_row_0/C:col10/1733801635350/Put/seqid=0 2024-12-10T03:33:55,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741943_1119 (size=12301) 2024-12-10T03:33:55,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=500 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/710aa0ea16754e968d5203ca31a9314a 2024-12-10T03:33:55,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/2ef440726eae438fab375e337570b60f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/2ef440726eae438fab375e337570b60f 2024-12-10T03:33:55,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/2ef440726eae438fab375e337570b60f, entries=200, sequenceid=500, filesize=14.4 K 2024-12-10T03:33:55,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d272460a66a940da9adfd62ddf936a67 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d272460a66a940da9adfd62ddf936a67 2024-12-10T03:33:55,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d272460a66a940da9adfd62ddf936a67, entries=150, sequenceid=500, filesize=12.0 K 2024-12-10T03:33:55,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/710aa0ea16754e968d5203ca31a9314a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/710aa0ea16754e968d5203ca31a9314a 2024-12-10T03:33:55,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/710aa0ea16754e968d5203ca31a9314a, entries=150, sequenceid=500, filesize=12.0 K 2024-12-10T03:33:55,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for c1d70420fe32ca56286cb5d739cf351d in 457ms, sequenceid=500, compaction requested=true 2024-12-10T03:33:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:55,808 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:55,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T03:33:55,809 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:55,809 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:55,809 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:55,809 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,809 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6d5272a2ab3b442f9f4075b2835680d2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9da96bcf6bd147eb9389185b8835c056, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/2ef440726eae438fab375e337570b60f] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=39.5 K 2024-12-10T03:33:55,810 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d5272a2ab3b442f9f4075b2835680d2, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733801632282 2024-12-10T03:33:55,810 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9da96bcf6bd147eb9389185b8835c056, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1733801632606 2024-12-10T03:33:55,810 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:55,811 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:55,811 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,811 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e2e5f1245a3540dabb8799aca9c59d2c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/387bd393d5244887a9689dbc9401e1fe, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d272460a66a940da9adfd62ddf936a67] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=37.1 K 2024-12-10T03:33:55,811 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ef440726eae438fab375e337570b60f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733801634741 2024-12-10T03:33:55,811 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting e2e5f1245a3540dabb8799aca9c59d2c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733801632282 2024-12-10T03:33:55,812 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 387bd393d5244887a9689dbc9401e1fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1733801632606 2024-12-10T03:33:55,812 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d272460a66a940da9adfd62ddf936a67, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733801634741 2024-12-10T03:33:55,819 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:55,819 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#106 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:55,820 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a89824c568f54aa99e4c53dca1a9627f is 50, key is test_row_0/A:col10/1733801635350/Put/seqid=0 2024-12-10T03:33:55,820 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/5d074dd8bebd4af4bd9b67e15eef415b is 50, key is test_row_0/B:col10/1733801635350/Put/seqid=0 2024-12-10T03:33:55,835 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-10T03:33:55,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,836 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T03:33:55,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:55,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:55,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:55,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:55,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:55,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:55,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741944_1120 (size=13493) 2024-12-10T03:33:55,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741945_1121 (size=13493) 2024-12-10T03:33:55,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/fe9d65daf8f642249a5398e1313e56ea is 50, key is test_row_0/A:col10/1733801635368/Put/seqid=0 2024-12-10T03:33:55,843 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/5d074dd8bebd4af4bd9b67e15eef415b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5d074dd8bebd4af4bd9b67e15eef415b 2024-12-10T03:33:55,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741946_1122 (size=12301) 2024-12-10T03:33:55,849 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into 5d074dd8bebd4af4bd9b67e15eef415b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:55,849 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:55,849 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801635808; duration=0sec 2024-12-10T03:33:55,849 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:55,849 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:55,849 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:55,850 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:55,850 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:55,850 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:55,851 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/11b8f0e07c0f44e19c07abffc5a098c0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c7bd196f3a174b22bb873669b2682ad5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/710aa0ea16754e968d5203ca31a9314a] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=37.1 K 2024-12-10T03:33:55,851 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 11b8f0e07c0f44e19c07abffc5a098c0, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733801632282 2024-12-10T03:33:55,851 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c7bd196f3a174b22bb873669b2682ad5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1733801632606 2024-12-10T03:33:55,852 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 710aa0ea16754e968d5203ca31a9314a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733801634741 2024-12-10T03:33:55,858 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#108 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:55,858 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a1125882790344868201783b7dd8774e is 50, key is test_row_0/C:col10/1733801635350/Put/seqid=0 2024-12-10T03:33:55,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741947_1123 (size=13493) 2024-12-10T03:33:55,868 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a1125882790344868201783b7dd8774e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a1125882790344868201783b7dd8774e 2024-12-10T03:33:55,872 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into a1125882790344868201783b7dd8774e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:55,872 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:55,873 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801635808; duration=0sec 2024-12-10T03:33:55,873 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:55,873 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T03:33:55,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:55,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:55,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801695984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801695985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801695985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:55,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:55,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801695986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801696088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801696089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801696089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801696089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,242 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a89824c568f54aa99e4c53dca1a9627f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a89824c568f54aa99e4c53dca1a9627f 2024-12-10T03:33:56,247 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/fe9d65daf8f642249a5398e1313e56ea 2024-12-10T03:33:56,247 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into a89824c568f54aa99e4c53dca1a9627f(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:56,247 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:56,247 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801635808; duration=0sec 2024-12-10T03:33:56,248 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:56,248 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:56,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/4ac36cb2093948eabe926734fac0ff18 is 50, key is test_row_0/B:col10/1733801635368/Put/seqid=0 2024-12-10T03:33:56,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741948_1124 (size=12301) 2024-12-10T03:33:56,259 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/4ac36cb2093948eabe926734fac0ff18 2024-12-10T03:33:56,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5136547df5b4441f8e093bb09e3ffc99 is 50, key is test_row_0/C:col10/1733801635368/Put/seqid=0 2024-12-10T03:33:56,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741949_1125 (size=12301) 2024-12-10T03:33:56,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801696291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801696292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801696292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801696294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T03:33:56,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801696595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801696595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801696595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801696597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,671 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5136547df5b4441f8e093bb09e3ffc99 2024-12-10T03:33:56,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/fe9d65daf8f642249a5398e1313e56ea as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/fe9d65daf8f642249a5398e1313e56ea 2024-12-10T03:33:56,680 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/fe9d65daf8f642249a5398e1313e56ea, entries=150, sequenceid=523, filesize=12.0 K 2024-12-10T03:33:56,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/4ac36cb2093948eabe926734fac0ff18 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4ac36cb2093948eabe926734fac0ff18 2024-12-10T03:33:56,693 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4ac36cb2093948eabe926734fac0ff18, entries=150, sequenceid=523, filesize=12.0 K 2024-12-10T03:33:56,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5136547df5b4441f8e093bb09e3ffc99 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5136547df5b4441f8e093bb09e3ffc99 2024-12-10T03:33:56,697 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5136547df5b4441f8e093bb09e3ffc99, entries=150, sequenceid=523, filesize=12.0 K 2024-12-10T03:33:56,698 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c1d70420fe32ca56286cb5d739cf351d in 862ms, sequenceid=523, compaction requested=false 2024-12-10T03:33:56,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:56,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:56,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-10T03:33:56,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-10T03:33:56,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-10T03:33:56,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3210 sec 2024-12-10T03:33:56,702 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.3260 sec 2024-12-10T03:33:56,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:56,770 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T03:33:56,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:56,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:56,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:56,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:56,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:56,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:56,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a5f816a6411b43ef85b18b2c5e0719c1 is 50, key is test_row_0/A:col10/1733801635984/Put/seqid=0 2024-12-10T03:33:56,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741950_1126 (size=14741) 2024-12-10T03:33:56,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801696811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:56,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:56,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801696913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:57,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801697097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:57,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801697101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:57,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801697101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:57,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801697102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:57,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801697116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a5f816a6411b43ef85b18b2c5e0719c1 2024-12-10T03:33:57,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/24c1ca6663f144dbab5e1a9dec77d8ab is 50, key is test_row_0/B:col10/1733801635984/Put/seqid=0 2024-12-10T03:33:57,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741951_1127 (size=12301) 2024-12-10T03:33:57,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:57,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801697419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-10T03:33:57,481 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-10T03:33:57,482 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-10T03:33:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T03:33:57,483 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:57,484 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:57,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:57,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T03:33:57,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/24c1ca6663f144dbab5e1a9dec77d8ab 2024-12-10T03:33:57,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/834d71468662469c8886d7b1a74684fa is 50, key is test_row_0/C:col10/1733801635984/Put/seqid=0 2024-12-10T03:33:57,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741952_1128 (size=12301) 2024-12-10T03:33:57,604 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/834d71468662469c8886d7b1a74684fa 2024-12-10T03:33:57,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a5f816a6411b43ef85b18b2c5e0719c1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a5f816a6411b43ef85b18b2c5e0719c1 2024-12-10T03:33:57,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a5f816a6411b43ef85b18b2c5e0719c1, entries=200, sequenceid=540, filesize=14.4 K 2024-12-10T03:33:57,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/24c1ca6663f144dbab5e1a9dec77d8ab as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/24c1ca6663f144dbab5e1a9dec77d8ab 2024-12-10T03:33:57,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/24c1ca6663f144dbab5e1a9dec77d8ab, entries=150, sequenceid=540, filesize=12.0 K 2024-12-10T03:33:57,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/834d71468662469c8886d7b1a74684fa as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/834d71468662469c8886d7b1a74684fa 2024-12-10T03:33:57,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/834d71468662469c8886d7b1a74684fa, entries=150, sequenceid=540, filesize=12.0 K 2024-12-10T03:33:57,627 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for c1d70420fe32ca56286cb5d739cf351d in 857ms, sequenceid=540, compaction requested=true 2024-12-10T03:33:57,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:57,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:57,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:57,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:57,628 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:57,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:57,628 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:57,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:57,628 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:57,629 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:57,629 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40535 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:57,629 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:57,629 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:57,629 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:57,629 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:57,629 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5d074dd8bebd4af4bd9b67e15eef415b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4ac36cb2093948eabe926734fac0ff18, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/24c1ca6663f144dbab5e1a9dec77d8ab] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=37.2 K 2024-12-10T03:33:57,629 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a89824c568f54aa99e4c53dca1a9627f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/fe9d65daf8f642249a5398e1313e56ea, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a5f816a6411b43ef85b18b2c5e0719c1] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=39.6 K 2024-12-10T03:33:57,630 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a89824c568f54aa99e4c53dca1a9627f, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733801634741 2024-12-10T03:33:57,630 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d074dd8bebd4af4bd9b67e15eef415b, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733801634741 2024-12-10T03:33:57,630 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe9d65daf8f642249a5398e1313e56ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1733801635365 2024-12-10T03:33:57,630 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ac36cb2093948eabe926734fac0ff18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1733801635365 2024-12-10T03:33:57,630 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5f816a6411b43ef85b18b2c5e0719c1, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733801635983 2024-12-10T03:33:57,630 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 24c1ca6663f144dbab5e1a9dec77d8ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733801635984 2024-12-10T03:33:57,635 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-10T03:33:57,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:57,636 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T03:33:57,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:57,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:57,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:57,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:57,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:57,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:57,639 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:57,639 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/7145bd37bc254c47956c9043011891a9 is 50, key is test_row_0/B:col10/1733801635984/Put/seqid=0 2024-12-10T03:33:57,641 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#115 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:57,642 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6f0375a0eef14400be0e8d2762285a2e is 50, key is test_row_0/A:col10/1733801635984/Put/seqid=0 2024-12-10T03:33:57,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a4d654a0a94e44e6bcb57192872d88b1 is 50, key is test_row_0/A:col10/1733801636787/Put/seqid=0 2024-12-10T03:33:57,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741954_1130 (size=13595) 2024-12-10T03:33:57,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741953_1129 (size=13595) 2024-12-10T03:33:57,669 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/7145bd37bc254c47956c9043011891a9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7145bd37bc254c47956c9043011891a9 2024-12-10T03:33:57,675 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into 7145bd37bc254c47956c9043011891a9(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:57,675 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:57,675 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801637627; duration=0sec 2024-12-10T03:33:57,675 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:57,675 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:57,675 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:57,676 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:57,676 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:57,676 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:57,676 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a1125882790344868201783b7dd8774e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5136547df5b4441f8e093bb09e3ffc99, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/834d71468662469c8886d7b1a74684fa] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=37.2 K 2024-12-10T03:33:57,677 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting a1125882790344868201783b7dd8774e, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=500, earliestPutTs=1733801634741 2024-12-10T03:33:57,677 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 5136547df5b4441f8e093bb09e3ffc99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1733801635365 2024-12-10T03:33:57,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741955_1131 (size=12301) 2024-12-10T03:33:57,678 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 834d71468662469c8886d7b1a74684fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733801635984 2024-12-10T03:33:57,681 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=562 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a4d654a0a94e44e6bcb57192872d88b1 2024-12-10T03:33:57,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1c1e93bff1e14032a81c606559b1d9de is 50, key is test_row_0/B:col10/1733801636787/Put/seqid=0 2024-12-10T03:33:57,689 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#118 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:57,690 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5a1b829f95f441299f5a4b3dc2e36a45 is 50, key is test_row_0/C:col10/1733801635984/Put/seqid=0 2024-12-10T03:33:57,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741956_1132 (size=12301) 2024-12-10T03:33:57,697 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=562 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1c1e93bff1e14032a81c606559b1d9de 2024-12-10T03:33:57,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741957_1133 (size=13595) 2024-12-10T03:33:57,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/425a75fc10b24ade820e946e5a9896f5 is 50, key is test_row_0/C:col10/1733801636787/Put/seqid=0 2024-12-10T03:33:57,709 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/5a1b829f95f441299f5a4b3dc2e36a45 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5a1b829f95f441299f5a4b3dc2e36a45 2024-12-10T03:33:57,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741958_1134 (size=12301) 2024-12-10T03:33:57,713 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=562 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/425a75fc10b24ade820e946e5a9896f5 2024-12-10T03:33:57,714 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 5a1b829f95f441299f5a4b3dc2e36a45(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:57,714 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:57,714 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801637628; duration=0sec 2024-12-10T03:33:57,714 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:57,714 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:57,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/a4d654a0a94e44e6bcb57192872d88b1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a4d654a0a94e44e6bcb57192872d88b1 2024-12-10T03:33:57,721 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a4d654a0a94e44e6bcb57192872d88b1, entries=150, sequenceid=562, filesize=12.0 K 2024-12-10T03:33:57,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/1c1e93bff1e14032a81c606559b1d9de as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1c1e93bff1e14032a81c606559b1d9de 2024-12-10T03:33:57,726 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1c1e93bff1e14032a81c606559b1d9de, entries=150, sequenceid=562, filesize=12.0 K 2024-12-10T03:33:57,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/425a75fc10b24ade820e946e5a9896f5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/425a75fc10b24ade820e946e5a9896f5 2024-12-10T03:33:57,731 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/425a75fc10b24ade820e946e5a9896f5, entries=150, sequenceid=562, filesize=12.0 K 2024-12-10T03:33:57,732 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for c1d70420fe32ca56286cb5d739cf351d in 96ms, sequenceid=562, compaction requested=false 2024-12-10T03:33:57,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:57,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:57,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-10T03:33:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-10T03:33:57,734 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-10T03:33:57,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 249 msec 2024-12-10T03:33:57,735 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 253 msec 2024-12-10T03:33:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T03:33:57,785 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-10T03:33:57,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:33:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-10T03:33:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T03:33:57,788 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:33:57,788 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:33:57,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:33:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T03:33:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:57,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:33:57,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:57,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:57,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:57,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:57,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:57,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:57,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/0cca58a95aa741418aafc1a34f0f91d7 is 50, key is test_row_0/A:col10/1733801637932/Put/seqid=0 2024-12-10T03:33:57,939 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:57,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-10T03:33:57,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:57,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:57,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:57,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:57,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:57,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741959_1135 (size=14737) 2024-12-10T03:33:57,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:57,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=575 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/0cca58a95aa741418aafc1a34f0f91d7 2024-12-10T03:33:57,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/ea7e2ffbcde84d1eacffe580c06d4b86 is 50, key is test_row_0/B:col10/1733801637932/Put/seqid=0 2024-12-10T03:33:57,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741960_1136 (size=9857) 2024-12-10T03:33:57,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801697975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,059 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6f0375a0eef14400be0e8d2762285a2e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6f0375a0eef14400be0e8d2762285a2e 2024-12-10T03:33:58,065 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into 6f0375a0eef14400be0e8d2762285a2e(size=13.3 K), total size for store is 25.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:58,065 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:58,065 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801637627; duration=0sec 2024-12-10T03:33:58,065 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:58,065 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:58,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:58,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801698077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T03:33:58,092 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-10T03:33:58,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:58,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49190 deadline: 1733801698104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49162 deadline: 1733801698104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:58,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49168 deadline: 1733801698104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:58,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49180 deadline: 1733801698112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-10T03:33:58,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:58,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:58,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801698279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=575 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/ea7e2ffbcde84d1eacffe580c06d4b86 2024-12-10T03:33:58,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/21b724172cfd4ab3b720f993d355928e is 50, key is test_row_0/C:col10/1733801637932/Put/seqid=0 2024-12-10T03:33:58,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741961_1137 (size=9857) 2024-12-10T03:33:58,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T03:33:58,397 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-10T03:33:58,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:58,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-10T03:33:58,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:58,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:33:58,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49174 deadline: 1733801698581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,702 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-10T03:33:58,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:58,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:33:58,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=575 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/21b724172cfd4ab3b720f993d355928e 2024-12-10T03:33:58,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/0cca58a95aa741418aafc1a34f0f91d7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/0cca58a95aa741418aafc1a34f0f91d7 2024-12-10T03:33:58,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/0cca58a95aa741418aafc1a34f0f91d7, entries=200, sequenceid=575, filesize=14.4 K 2024-12-10T03:33:58,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/ea7e2ffbcde84d1eacffe580c06d4b86 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/ea7e2ffbcde84d1eacffe580c06d4b86 2024-12-10T03:33:58,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/ea7e2ffbcde84d1eacffe580c06d4b86, entries=100, sequenceid=575, filesize=9.6 K 2024-12-10T03:33:58,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/21b724172cfd4ab3b720f993d355928e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/21b724172cfd4ab3b720f993d355928e 2024-12-10T03:33:58,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/21b724172cfd4ab3b720f993d355928e, entries=100, sequenceid=575, filesize=9.6 K 2024-12-10T03:33:58,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c1d70420fe32ca56286cb5d739cf351d in 852ms, sequenceid=575, compaction requested=true 2024-12-10T03:33:58,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:58,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:33:58,785 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:58,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:58,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:33:58,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:58,785 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:58,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1d70420fe32ca56286cb5d739cf351d:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:33:58,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:58,786 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40633 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:58,786 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:58,786 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/B is initiating minor compaction (all files) 2024-12-10T03:33:58,786 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/A is initiating minor compaction (all files) 2024-12-10T03:33:58,786 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/B in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,786 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/A in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,787 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7145bd37bc254c47956c9043011891a9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1c1e93bff1e14032a81c606559b1d9de, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/ea7e2ffbcde84d1eacffe580c06d4b86] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=34.9 K 2024-12-10T03:33:58,787 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6f0375a0eef14400be0e8d2762285a2e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a4d654a0a94e44e6bcb57192872d88b1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/0cca58a95aa741418aafc1a34f0f91d7] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=39.7 K 2024-12-10T03:33:58,787 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f0375a0eef14400be0e8d2762285a2e, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733801635984 2024-12-10T03:33:58,787 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7145bd37bc254c47956c9043011891a9, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733801635984 2024-12-10T03:33:58,787 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c1e93bff1e14032a81c606559b1d9de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=562, earliestPutTs=1733801636787 2024-12-10T03:33:58,787 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4d654a0a94e44e6bcb57192872d88b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=562, earliestPutTs=1733801636787 2024-12-10T03:33:58,788 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ea7e2ffbcde84d1eacffe580c06d4b86, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=575, earliestPutTs=1733801637929 2024-12-10T03:33:58,788 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cca58a95aa741418aafc1a34f0f91d7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=575, earliestPutTs=1733801637927 2024-12-10T03:33:58,794 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#A#compaction#123 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:58,794 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#B#compaction#124 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:58,794 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/e13c54bf3a574853908351845b1cfd24 is 50, key is test_row_0/A:col10/1733801637932/Put/seqid=0 2024-12-10T03:33:58,794 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d53e8cd3d9034babb2ce8183318b387a is 50, key is test_row_0/B:col10/1733801637932/Put/seqid=0 2024-12-10T03:33:58,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741963_1139 (size=13697) 2024-12-10T03:33:58,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741962_1138 (size=13697) 2024-12-10T03:33:58,854 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:33:58,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-10T03:33:58,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:58,855 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:33:58,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:33:58,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:58,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:33:58,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:58,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:33:58,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:33:58,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6dae3a367aca4c4ba544a1551c5a583a is 50, key is test_row_0/A:col10/1733801637970/Put/seqid=0 2024-12-10T03:33:58,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741964_1140 (size=12301) 2024-12-10T03:33:58,865 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=601 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6dae3a367aca4c4ba544a1551c5a583a 2024-12-10T03:33:58,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/7592a8c0000648e0a6235e8f5fa07e66 is 50, key is test_row_0/B:col10/1733801637970/Put/seqid=0 2024-12-10T03:33:58,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741965_1141 (size=12301) 2024-12-10T03:33:58,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T03:33:59,065 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:51621 2024-12-10T03:33:59,065 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:51621 2024-12-10T03:33:59,065 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:51621 2024-12-10T03:33:59,066 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:33:59,066 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:33:59,066 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:33:59,068 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:51621 2024-12-10T03:33:59,068 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:33:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:33:59,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. as already flushing 2024-12-10T03:33:59,087 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:51621 2024-12-10T03:33:59,087 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:33:59,217 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/e13c54bf3a574853908351845b1cfd24 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/e13c54bf3a574853908351845b1cfd24 2024-12-10T03:33:59,220 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/d53e8cd3d9034babb2ce8183318b387a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d53e8cd3d9034babb2ce8183318b387a 2024-12-10T03:33:59,224 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/A of c1d70420fe32ca56286cb5d739cf351d into e13c54bf3a574853908351845b1cfd24(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:59,224 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:59,224 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/A, priority=13, startTime=1733801638785; duration=0sec 2024-12-10T03:33:59,225 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:33:59,225 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:A 2024-12-10T03:33:59,225 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:33:59,226 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35753 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:33:59,226 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): c1d70420fe32ca56286cb5d739cf351d/C is initiating minor compaction (all files) 2024-12-10T03:33:59,226 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c1d70420fe32ca56286cb5d739cf351d/C in TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:59,226 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5a1b829f95f441299f5a4b3dc2e36a45, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/425a75fc10b24ade820e946e5a9896f5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/21b724172cfd4ab3b720f993d355928e] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp, totalSize=34.9 K 2024-12-10T03:33:59,227 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a1b829f95f441299f5a4b3dc2e36a45, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1733801635984 2024-12-10T03:33:59,227 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/B of c1d70420fe32ca56286cb5d739cf351d into d53e8cd3d9034babb2ce8183318b387a(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:59,227 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:59,227 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/B, priority=13, startTime=1733801638785; duration=0sec 2024-12-10T03:33:59,227 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 425a75fc10b24ade820e946e5a9896f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=562, earliestPutTs=1733801636787 2024-12-10T03:33:59,227 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:59,227 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:B 2024-12-10T03:33:59,228 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21b724172cfd4ab3b720f993d355928e, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=575, earliestPutTs=1733801637929 2024-12-10T03:33:59,235 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1d70420fe32ca56286cb5d739cf351d#C#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:33:59,236 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/04d0da7d7f724fd4bec9c9b937f00e58 is 50, key is test_row_0/C:col10/1733801637932/Put/seqid=0 2024-12-10T03:33:59,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741966_1142 (size=13697) 2024-12-10T03:33:59,279 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=601 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/7592a8c0000648e0a6235e8f5fa07e66 2024-12-10T03:33:59,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/7ac3712a3bea4c77a30b5cfa65b6f743 is 50, key is test_row_0/C:col10/1733801637970/Put/seqid=0 2024-12-10T03:33:59,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741967_1143 (size=12301) 2024-12-10T03:33:59,372 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T03:33:59,651 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/04d0da7d7f724fd4bec9c9b937f00e58 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/04d0da7d7f724fd4bec9c9b937f00e58 2024-12-10T03:33:59,658 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c1d70420fe32ca56286cb5d739cf351d/C of c1d70420fe32ca56286cb5d739cf351d into 04d0da7d7f724fd4bec9c9b937f00e58(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:33:59,658 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:59,658 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d., storeName=c1d70420fe32ca56286cb5d739cf351d/C, priority=13, startTime=1733801638785; duration=0sec 2024-12-10T03:33:59,659 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:33:59,659 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1d70420fe32ca56286cb5d739cf351d:C 2024-12-10T03:33:59,691 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=601 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/7ac3712a3bea4c77a30b5cfa65b6f743 2024-12-10T03:33:59,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/6dae3a367aca4c4ba544a1551c5a583a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6dae3a367aca4c4ba544a1551c5a583a 2024-12-10T03:33:59,701 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6dae3a367aca4c4ba544a1551c5a583a, entries=150, sequenceid=601, filesize=12.0 K 2024-12-10T03:33:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/7592a8c0000648e0a6235e8f5fa07e66 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7592a8c0000648e0a6235e8f5fa07e66 2024-12-10T03:33:59,707 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7592a8c0000648e0a6235e8f5fa07e66, entries=150, sequenceid=601, filesize=12.0 K 2024-12-10T03:33:59,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/7ac3712a3bea4c77a30b5cfa65b6f743 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ac3712a3bea4c77a30b5cfa65b6f743 2024-12-10T03:33:59,712 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ac3712a3bea4c77a30b5cfa65b6f743, entries=150, sequenceid=601, filesize=12.0 K 2024-12-10T03:33:59,713 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=6.71 KB/6870 for c1d70420fe32ca56286cb5d739cf351d in 858ms, sequenceid=601, compaction requested=false 2024-12-10T03:33:59,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:33:59,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:33:59,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-10T03:33:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-10T03:33:59,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-10T03:33:59,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9260 sec 2024-12-10T03:33:59,717 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.9300 sec 2024-12-10T03:33:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-10T03:33:59,893 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-10T03:34:00,108 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:51621 2024-12-10T03:34:00,108 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:00,115 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:51621 2024-12-10T03:34:00,115 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:00,116 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:51621 2024-12-10T03:34:00,116 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:00,120 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:51621 2024-12-10T03:34:00,120 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:00,120 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T03:34:00,120 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-12-10T03:34:00,120 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-12-10T03:34:00,120 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 135 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8216 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8186 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3730 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11190 rows 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3747 2024-12-10T03:34:00,121 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11241 rows 2024-12-10T03:34:00,121 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T03:34:00,121 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:51621 2024-12-10T03:34:00,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:00,125 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T03:34:00,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T03:34:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:00,134 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801640133"}]},"ts":"1733801640133"} 2024-12-10T03:34:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-10T03:34:00,135 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T03:34:00,160 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T03:34:00,162 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:34:00,165 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1d70420fe32ca56286cb5d739cf351d, UNASSIGN}] 2024-12-10T03:34:00,166 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1d70420fe32ca56286cb5d739cf351d, UNASSIGN 2024-12-10T03:34:00,166 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=c1d70420fe32ca56286cb5d739cf351d, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:00,167 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:34:00,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:00,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-10T03:34:00,323 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:00,326 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:34:00,327 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:34:00,329 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing c1d70420fe32ca56286cb5d739cf351d, disabling compactions & flushes 2024-12-10T03:34:00,329 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:34:00,329 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:34:00,329 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. after waiting 0 ms 2024-12-10T03:34:00,329 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:34:00,329 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing c1d70420fe32ca56286cb5d739cf351d 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T03:34:00,329 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=A 2024-12-10T03:34:00,330 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:00,330 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=B 2024-12-10T03:34:00,330 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:00,330 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c1d70420fe32ca56286cb5d739cf351d, store=C 2024-12-10T03:34:00,330 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:00,338 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/f1bdbabaeec2406babdab8937721169d is 50, key is test_row_1/A:col10/1733801640115/Put/seqid=0 2024-12-10T03:34:00,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741968_1144 (size=9857) 2024-12-10T03:34:00,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-10T03:34:00,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-10T03:34:00,743 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=612 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/f1bdbabaeec2406babdab8937721169d 2024-12-10T03:34:00,756 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/2db5e7e2f37d422eb71f735155ef0cce is 50, key is test_row_1/B:col10/1733801640115/Put/seqid=0 2024-12-10T03:34:00,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741969_1145 (size=9857) 2024-12-10T03:34:01,162 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=612 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/2db5e7e2f37d422eb71f735155ef0cce 2024-12-10T03:34:01,177 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a8b46ac921c043dea38411f91e31e400 is 50, key is test_row_1/C:col10/1733801640115/Put/seqid=0 2024-12-10T03:34:01,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741970_1146 (size=9857) 2024-12-10T03:34:01,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-10T03:34:01,584 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=612 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a8b46ac921c043dea38411f91e31e400 2024-12-10T03:34:01,597 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/A/f1bdbabaeec2406babdab8937721169d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f1bdbabaeec2406babdab8937721169d 2024-12-10T03:34:01,601 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f1bdbabaeec2406babdab8937721169d, entries=100, sequenceid=612, filesize=9.6 K 2024-12-10T03:34:01,602 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/B/2db5e7e2f37d422eb71f735155ef0cce as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/2db5e7e2f37d422eb71f735155ef0cce 2024-12-10T03:34:01,606 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/2db5e7e2f37d422eb71f735155ef0cce, entries=100, sequenceid=612, filesize=9.6 K 2024-12-10T03:34:01,606 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/.tmp/C/a8b46ac921c043dea38411f91e31e400 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a8b46ac921c043dea38411f91e31e400 2024-12-10T03:34:01,610 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a8b46ac921c043dea38411f91e31e400, entries=100, sequenceid=612, filesize=9.6 K 2024-12-10T03:34:01,611 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for c1d70420fe32ca56286cb5d739cf351d in 1282ms, sequenceid=612, compaction requested=true 2024-12-10T03:34:01,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/df0e075df11542689ac4a29fc43a6731, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/78d8c89704af4b15af2936c0c535197d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/17eec1e7013246c49b384251cef36489, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/ddd099b582b74bd29a8bc7e3427c84f0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/bc8da914fca4457c92e7983a5fc00fa1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/069b84c7755b41b0bd78b0538f7a2c4b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9143b97d11674514804c851fb8bc4504, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/51a1cf09f9ae4aa6a4f522c188937e49, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/77c819587a3740e391ca45a80ace6cb4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/06f9b82e0ecb457282078b8da0d7f577, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/3186e7dd80c04f13afdb9f0e3de63c0a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6cb9809decda4d2eb5543554ebe5889f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1cbaa9a6a4f446f687dce25ee8eb1dda, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/baa87e3b8bcc4822820dfedc78654dc9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/327f95038a4b4f30806aaba66afb0827, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/4fb6eab13a4a4d51b1c842200bf2109c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6dc20672ac145c88f2b513baf929621, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/44df4f200f36407e89a63a856d6d023a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a408d820e123434c85da0d1242966669, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d21d6e036e82432eaf82f346dcbd0726, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b5a13ddc259a4571ae238cfeabe11a40, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/c64a74b25ce043908d88fc60838c8d9b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6d876bdc36849ecb460972db335f2ca, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/92fd359d7c7646d7933106fa7d31d243, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/36ac2bf15bff4177a70b1654c43e2446, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/48e263bc315242ad917c579c2f261d5f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d8574480e8b444fa97ec0194d55df7f3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f9045a348e104e148d4cb250c5afab48, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1ede28bf76e04a12a776d5ce2c53f3e7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/af870d9ea93f4110bc5816c3ca7846ca, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/37314e2237f444f8a17fe0369c75c255, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6d5272a2ab3b442f9f4075b2835680d2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/79b8a2273fb34656b129eb4966b46c03, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9da96bcf6bd147eb9389185b8835c056, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/2ef440726eae438fab375e337570b60f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a89824c568f54aa99e4c53dca1a9627f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/fe9d65daf8f642249a5398e1313e56ea, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a5f816a6411b43ef85b18b2c5e0719c1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6f0375a0eef14400be0e8d2762285a2e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a4d654a0a94e44e6bcb57192872d88b1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/0cca58a95aa741418aafc1a34f0f91d7] to archive 2024-12-10T03:34:01,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:01,621 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/df0e075df11542689ac4a29fc43a6731 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/df0e075df11542689ac4a29fc43a6731 2024-12-10T03:34:01,621 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/bc8da914fca4457c92e7983a5fc00fa1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/bc8da914fca4457c92e7983a5fc00fa1 2024-12-10T03:34:01,621 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9143b97d11674514804c851fb8bc4504 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9143b97d11674514804c851fb8bc4504 2024-12-10T03:34:01,621 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/17eec1e7013246c49b384251cef36489 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/17eec1e7013246c49b384251cef36489 2024-12-10T03:34:01,621 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/78d8c89704af4b15af2936c0c535197d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/78d8c89704af4b15af2936c0c535197d 2024-12-10T03:34:01,621 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/ddd099b582b74bd29a8bc7e3427c84f0 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/ddd099b582b74bd29a8bc7e3427c84f0 2024-12-10T03:34:01,622 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/51a1cf09f9ae4aa6a4f522c188937e49 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/51a1cf09f9ae4aa6a4f522c188937e49 2024-12-10T03:34:01,622 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/069b84c7755b41b0bd78b0538f7a2c4b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/069b84c7755b41b0bd78b0538f7a2c4b 2024-12-10T03:34:01,623 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/baa87e3b8bcc4822820dfedc78654dc9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/baa87e3b8bcc4822820dfedc78654dc9 2024-12-10T03:34:01,623 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/77c819587a3740e391ca45a80ace6cb4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/77c819587a3740e391ca45a80ace6cb4 2024-12-10T03:34:01,624 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/3186e7dd80c04f13afdb9f0e3de63c0a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/3186e7dd80c04f13afdb9f0e3de63c0a 2024-12-10T03:34:01,624 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1cbaa9a6a4f446f687dce25ee8eb1dda to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1cbaa9a6a4f446f687dce25ee8eb1dda 2024-12-10T03:34:01,624 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/327f95038a4b4f30806aaba66afb0827 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/327f95038a4b4f30806aaba66afb0827 2024-12-10T03:34:01,624 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6cb9809decda4d2eb5543554ebe5889f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6cb9809decda4d2eb5543554ebe5889f 2024-12-10T03:34:01,624 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/06f9b82e0ecb457282078b8da0d7f577 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/06f9b82e0ecb457282078b8da0d7f577 2024-12-10T03:34:01,624 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/4fb6eab13a4a4d51b1c842200bf2109c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/4fb6eab13a4a4d51b1c842200bf2109c 2024-12-10T03:34:01,625 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/44df4f200f36407e89a63a856d6d023a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/44df4f200f36407e89a63a856d6d023a 2024-12-10T03:34:01,626 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d21d6e036e82432eaf82f346dcbd0726 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d21d6e036e82432eaf82f346dcbd0726 2024-12-10T03:34:01,626 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6dc20672ac145c88f2b513baf929621 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6dc20672ac145c88f2b513baf929621 2024-12-10T03:34:01,626 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/c64a74b25ce043908d88fc60838c8d9b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/c64a74b25ce043908d88fc60838c8d9b 2024-12-10T03:34:01,627 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a408d820e123434c85da0d1242966669 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a408d820e123434c85da0d1242966669 2024-12-10T03:34:01,627 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b5a13ddc259a4571ae238cfeabe11a40 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b5a13ddc259a4571ae238cfeabe11a40 2024-12-10T03:34:01,628 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/92fd359d7c7646d7933106fa7d31d243 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/92fd359d7c7646d7933106fa7d31d243 2024-12-10T03:34:01,628 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6d876bdc36849ecb460972db335f2ca to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/b6d876bdc36849ecb460972db335f2ca 2024-12-10T03:34:01,628 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/36ac2bf15bff4177a70b1654c43e2446 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/36ac2bf15bff4177a70b1654c43e2446 2024-12-10T03:34:01,629 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/48e263bc315242ad917c579c2f261d5f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/48e263bc315242ad917c579c2f261d5f 2024-12-10T03:34:01,629 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f9045a348e104e148d4cb250c5afab48 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f9045a348e104e148d4cb250c5afab48 2024-12-10T03:34:01,629 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d8574480e8b444fa97ec0194d55df7f3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/d8574480e8b444fa97ec0194d55df7f3 2024-12-10T03:34:01,630 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1ede28bf76e04a12a776d5ce2c53f3e7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/1ede28bf76e04a12a776d5ce2c53f3e7 2024-12-10T03:34:01,630 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/af870d9ea93f4110bc5816c3ca7846ca to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/af870d9ea93f4110bc5816c3ca7846ca 2024-12-10T03:34:01,630 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/37314e2237f444f8a17fe0369c75c255 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/37314e2237f444f8a17fe0369c75c255 2024-12-10T03:34:01,630 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6d5272a2ab3b442f9f4075b2835680d2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6d5272a2ab3b442f9f4075b2835680d2 2024-12-10T03:34:01,631 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/79b8a2273fb34656b129eb4966b46c03 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/79b8a2273fb34656b129eb4966b46c03 2024-12-10T03:34:01,632 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9da96bcf6bd147eb9389185b8835c056 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/9da96bcf6bd147eb9389185b8835c056 2024-12-10T03:34:01,632 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a89824c568f54aa99e4c53dca1a9627f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a89824c568f54aa99e4c53dca1a9627f 2024-12-10T03:34:01,632 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/2ef440726eae438fab375e337570b60f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/2ef440726eae438fab375e337570b60f 2024-12-10T03:34:01,633 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/fe9d65daf8f642249a5398e1313e56ea to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/fe9d65daf8f642249a5398e1313e56ea 2024-12-10T03:34:01,633 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a4d654a0a94e44e6bcb57192872d88b1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a4d654a0a94e44e6bcb57192872d88b1 2024-12-10T03:34:01,633 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a5f816a6411b43ef85b18b2c5e0719c1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/a5f816a6411b43ef85b18b2c5e0719c1 2024-12-10T03:34:01,633 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6f0375a0eef14400be0e8d2762285a2e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6f0375a0eef14400be0e8d2762285a2e 2024-12-10T03:34:01,633 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/0cca58a95aa741418aafc1a34f0f91d7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/0cca58a95aa741418aafc1a34f0f91d7 2024-12-10T03:34:01,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/89e69143874a4e8f839544c7cfe019bb, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/80c1555cb9a948dcad06d77143ebe700, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c4aef55db7cf4d428f00c29042545cc9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5aa08b6dd5004d8080d47788a72cb796, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e145990b64fc475d97193ab61b9a0802, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/808f0196a774491a9e3076cfddab9493, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bf187ee731e54de88f1b6a8575e1ce29, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/00d5bf6baaf34081a044b60f21978e16, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d709e98dce944ff3bed776361880c4aa, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1bd5f830b5744c6fa9a93245559ddbcc, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c45a7e6cd8594885ab6f9f2451b66c93, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d75abf6e0b054bc28767eab3ccde9f7d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bc5a8c45cab74379a6ec5ca8adc30b82, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4a54ce9331d341a2a354ff4f96aae174, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/edd6b446f60d4219927f6b9a211f83ad, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a8fb61179e5f4d1480b91ee4b38c0bc3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/8ebb0db620c24944b81afc3f447441f2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f4b0050703194dcab940199c071eb782, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f620d060b0f745c5888faaf1e1361542, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d7a0d1aeb0c647049d4b4ef80eb0b8df, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1cd152e352c549ec8ef3cd45a7096146, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a282ff605ed04d2c8ab73ed2bd1ceb7e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c31bc6b1d4e346448eb635ca8f2c758a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c5ab85cfefe44e75a58f6282da18c665, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/722d9e8dc9c04b7fb0b47b0797f42dce, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1b5d1327e0a94dff91308c91d0a3b906, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/775c7baad6d84011a4e6fdc52228d613, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/056b7fc6f0314ea2bab85e4f97bd6143, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/3ad6ac936e8945c09218589c91c3606e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/34ebc19078b146e68f76f35dece0498f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5c4bb1635b9f403887973cf2eff6c180, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e2e5f1245a3540dabb8799aca9c59d2c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/64cd130bf3c24911bc506e4bdf85d77e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/387bd393d5244887a9689dbc9401e1fe, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5d074dd8bebd4af4bd9b67e15eef415b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d272460a66a940da9adfd62ddf936a67, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4ac36cb2093948eabe926734fac0ff18, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7145bd37bc254c47956c9043011891a9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/24c1ca6663f144dbab5e1a9dec77d8ab, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1c1e93bff1e14032a81c606559b1d9de, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/ea7e2ffbcde84d1eacffe580c06d4b86] to archive 2024-12-10T03:34:01,647 DEBUG [StoreCloser-TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:01,663 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/89e69143874a4e8f839544c7cfe019bb to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/89e69143874a4e8f839544c7cfe019bb 2024-12-10T03:34:01,663 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c4aef55db7cf4d428f00c29042545cc9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c4aef55db7cf4d428f00c29042545cc9 2024-12-10T03:34:01,663 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/00d5bf6baaf34081a044b60f21978e16 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/00d5bf6baaf34081a044b60f21978e16 2024-12-10T03:34:01,663 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bf187ee731e54de88f1b6a8575e1ce29 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bf187ee731e54de88f1b6a8575e1ce29 2024-12-10T03:34:01,663 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/80c1555cb9a948dcad06d77143ebe700 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/80c1555cb9a948dcad06d77143ebe700 2024-12-10T03:34:01,664 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e145990b64fc475d97193ab61b9a0802 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e145990b64fc475d97193ab61b9a0802 2024-12-10T03:34:01,664 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5aa08b6dd5004d8080d47788a72cb796 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5aa08b6dd5004d8080d47788a72cb796 2024-12-10T03:34:01,665 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d709e98dce944ff3bed776361880c4aa to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d709e98dce944ff3bed776361880c4aa 2024-12-10T03:34:01,665 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c45a7e6cd8594885ab6f9f2451b66c93 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c45a7e6cd8594885ab6f9f2451b66c93 2024-12-10T03:34:01,665 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/808f0196a774491a9e3076cfddab9493 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/808f0196a774491a9e3076cfddab9493 2024-12-10T03:34:01,665 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d75abf6e0b054bc28767eab3ccde9f7d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d75abf6e0b054bc28767eab3ccde9f7d 2024-12-10T03:34:01,665 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bc5a8c45cab74379a6ec5ca8adc30b82 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/bc5a8c45cab74379a6ec5ca8adc30b82 2024-12-10T03:34:01,665 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1bd5f830b5744c6fa9a93245559ddbcc to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1bd5f830b5744c6fa9a93245559ddbcc 2024-12-10T03:34:01,665 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4a54ce9331d341a2a354ff4f96aae174 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4a54ce9331d341a2a354ff4f96aae174 2024-12-10T03:34:01,665 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/edd6b446f60d4219927f6b9a211f83ad to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/edd6b446f60d4219927f6b9a211f83ad 2024-12-10T03:34:01,666 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a8fb61179e5f4d1480b91ee4b38c0bc3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a8fb61179e5f4d1480b91ee4b38c0bc3 2024-12-10T03:34:01,667 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d7a0d1aeb0c647049d4b4ef80eb0b8df to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d7a0d1aeb0c647049d4b4ef80eb0b8df 2024-12-10T03:34:01,667 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/8ebb0db620c24944b81afc3f447441f2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/8ebb0db620c24944b81afc3f447441f2 2024-12-10T03:34:01,667 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c31bc6b1d4e346448eb635ca8f2c758a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c31bc6b1d4e346448eb635ca8f2c758a 2024-12-10T03:34:01,667 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f620d060b0f745c5888faaf1e1361542 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f620d060b0f745c5888faaf1e1361542 2024-12-10T03:34:01,667 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1cd152e352c549ec8ef3cd45a7096146 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1cd152e352c549ec8ef3cd45a7096146 2024-12-10T03:34:01,667 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f4b0050703194dcab940199c071eb782 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/f4b0050703194dcab940199c071eb782 2024-12-10T03:34:01,668 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a282ff605ed04d2c8ab73ed2bd1ceb7e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/a282ff605ed04d2c8ab73ed2bd1ceb7e 2024-12-10T03:34:01,668 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c5ab85cfefe44e75a58f6282da18c665 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/c5ab85cfefe44e75a58f6282da18c665 2024-12-10T03:34:01,669 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/3ad6ac936e8945c09218589c91c3606e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/3ad6ac936e8945c09218589c91c3606e 2024-12-10T03:34:01,669 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1b5d1327e0a94dff91308c91d0a3b906 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1b5d1327e0a94dff91308c91d0a3b906 2024-12-10T03:34:01,669 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/775c7baad6d84011a4e6fdc52228d613 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/775c7baad6d84011a4e6fdc52228d613 2024-12-10T03:34:01,669 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/722d9e8dc9c04b7fb0b47b0797f42dce to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/722d9e8dc9c04b7fb0b47b0797f42dce 2024-12-10T03:34:01,669 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/34ebc19078b146e68f76f35dece0498f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/34ebc19078b146e68f76f35dece0498f 2024-12-10T03:34:01,670 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/056b7fc6f0314ea2bab85e4f97bd6143 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/056b7fc6f0314ea2bab85e4f97bd6143 2024-12-10T03:34:01,670 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e2e5f1245a3540dabb8799aca9c59d2c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/e2e5f1245a3540dabb8799aca9c59d2c 2024-12-10T03:34:01,671 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5c4bb1635b9f403887973cf2eff6c180 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5c4bb1635b9f403887973cf2eff6c180 2024-12-10T03:34:01,671 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/64cd130bf3c24911bc506e4bdf85d77e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/64cd130bf3c24911bc506e4bdf85d77e 2024-12-10T03:34:01,671 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/387bd393d5244887a9689dbc9401e1fe to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/387bd393d5244887a9689dbc9401e1fe 2024-12-10T03:34:01,672 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d272460a66a940da9adfd62ddf936a67 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d272460a66a940da9adfd62ddf936a67 2024-12-10T03:34:01,672 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7145bd37bc254c47956c9043011891a9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7145bd37bc254c47956c9043011891a9 2024-12-10T03:34:01,672 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5d074dd8bebd4af4bd9b67e15eef415b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/5d074dd8bebd4af4bd9b67e15eef415b 2024-12-10T03:34:01,672 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4ac36cb2093948eabe926734fac0ff18 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/4ac36cb2093948eabe926734fac0ff18 2024-12-10T03:34:01,672 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/24c1ca6663f144dbab5e1a9dec77d8ab to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/24c1ca6663f144dbab5e1a9dec77d8ab 2024-12-10T03:34:01,672 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1c1e93bff1e14032a81c606559b1d9de to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/1c1e93bff1e14032a81c606559b1d9de 2024-12-10T03:34:01,673 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/ea7e2ffbcde84d1eacffe580c06d4b86 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/ea7e2ffbcde84d1eacffe580c06d4b86 2024-12-10T03:34:01,675 DEBUG [StoreCloser-TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2c3f7fb454644d62aad8c64e38755144, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0e215f817d0941638726e113e9c2e506, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a257cffc46404edf883f4df647410df6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ffa5c48975840fc97d3005b2739bd93, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/10145fd8dfa54a8193c61307dff51db6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5982bfb2978d419e8cc20e13ba4779d4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2542fff02c3e499293e5671c2f899de7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/92805d685a9b462da6138665c5eb52da, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/ecfaad489a31499dba090e247cd2a1e8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/893546d4f53849488c0a3b16058d1b38, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0d56c109a7884bfcb5277722873e444a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/16e651a03db645dd8c827759bfd739c5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5da0795fc09a4e18b68a8e1551938e64, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/469a63abb87d46f99532150e14585831, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d3e53045bf354771beb305327db6e216, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05454fc72e9142d9a78d9c04db257b98, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/25a397c978aa49d9bb90eb49e37afb43, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f0f90c1e65664fcbb6f81b0021f77e1b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/402bb7d762e14ce99ee106d0f12c1f52, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d9d30c92bc364b668108f1657a39d0b5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/087f98bd903342d0b0ac9dcabd9c5140, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/6a95dc5af51645ff988b92303c4fda31, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05bbe9e2af2c4c09ab904c20def86b09, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7347a6a0954f4e22a706cf68a1b27cd8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2b506dedf7274215bc17a30a12fa91a8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/1ee4f10d20c74e3b823d6d79bce0e6fc, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/300427e56dc54ca6b18022b8a33226f5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a3c3cb181975485da79cc8576284e82b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d7b9f5ed3bbd497599087305d589ffc9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c3d08f652dd64e749fcfc27949f49d29, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/b19713258592463c95c9f6cc184cc890, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/11b8f0e07c0f44e19c07abffc5a098c0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f5f46b1715594cd496dd071bca5bc255, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c7bd196f3a174b22bb873669b2682ad5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a1125882790344868201783b7dd8774e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/710aa0ea16754e968d5203ca31a9314a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5136547df5b4441f8e093bb09e3ffc99, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5a1b829f95f441299f5a4b3dc2e36a45, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/834d71468662469c8886d7b1a74684fa, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/425a75fc10b24ade820e946e5a9896f5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/21b724172cfd4ab3b720f993d355928e] to archive 2024-12-10T03:34:01,676 DEBUG [StoreCloser-TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:01,678 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0e215f817d0941638726e113e9c2e506 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0e215f817d0941638726e113e9c2e506 2024-12-10T03:34:01,678 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/10145fd8dfa54a8193c61307dff51db6 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/10145fd8dfa54a8193c61307dff51db6 2024-12-10T03:34:01,678 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5982bfb2978d419e8cc20e13ba4779d4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5982bfb2978d419e8cc20e13ba4779d4 2024-12-10T03:34:01,678 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/92805d685a9b462da6138665c5eb52da to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/92805d685a9b462da6138665c5eb52da 2024-12-10T03:34:01,679 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a257cffc46404edf883f4df647410df6 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a257cffc46404edf883f4df647410df6 2024-12-10T03:34:01,679 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2c3f7fb454644d62aad8c64e38755144 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2c3f7fb454644d62aad8c64e38755144 2024-12-10T03:34:01,679 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ffa5c48975840fc97d3005b2739bd93 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ffa5c48975840fc97d3005b2739bd93 2024-12-10T03:34:01,679 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2542fff02c3e499293e5671c2f899de7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2542fff02c3e499293e5671c2f899de7 2024-12-10T03:34:01,680 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/ecfaad489a31499dba090e247cd2a1e8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/ecfaad489a31499dba090e247cd2a1e8 2024-12-10T03:34:01,682 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0d56c109a7884bfcb5277722873e444a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/0d56c109a7884bfcb5277722873e444a 2024-12-10T03:34:01,682 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/16e651a03db645dd8c827759bfd739c5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/16e651a03db645dd8c827759bfd739c5 2024-12-10T03:34:01,682 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/469a63abb87d46f99532150e14585831 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/469a63abb87d46f99532150e14585831 2024-12-10T03:34:01,683 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d3e53045bf354771beb305327db6e216 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d3e53045bf354771beb305327db6e216 2024-12-10T03:34:01,684 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05454fc72e9142d9a78d9c04db257b98 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05454fc72e9142d9a78d9c04db257b98 2024-12-10T03:34:01,684 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/893546d4f53849488c0a3b16058d1b38 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/893546d4f53849488c0a3b16058d1b38 2024-12-10T03:34:01,684 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5da0795fc09a4e18b68a8e1551938e64 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5da0795fc09a4e18b68a8e1551938e64 2024-12-10T03:34:01,685 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/25a397c978aa49d9bb90eb49e37afb43 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/25a397c978aa49d9bb90eb49e37afb43 2024-12-10T03:34:01,685 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f0f90c1e65664fcbb6f81b0021f77e1b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f0f90c1e65664fcbb6f81b0021f77e1b 2024-12-10T03:34:01,685 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/402bb7d762e14ce99ee106d0f12c1f52 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/402bb7d762e14ce99ee106d0f12c1f52 2024-12-10T03:34:01,686 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d9d30c92bc364b668108f1657a39d0b5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d9d30c92bc364b668108f1657a39d0b5 2024-12-10T03:34:01,686 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/087f98bd903342d0b0ac9dcabd9c5140 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/087f98bd903342d0b0ac9dcabd9c5140 2024-12-10T03:34:01,686 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05bbe9e2af2c4c09ab904c20def86b09 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/05bbe9e2af2c4c09ab904c20def86b09 2024-12-10T03:34:01,687 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/1ee4f10d20c74e3b823d6d79bce0e6fc to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/1ee4f10d20c74e3b823d6d79bce0e6fc 2024-12-10T03:34:01,687 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2b506dedf7274215bc17a30a12fa91a8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/2b506dedf7274215bc17a30a12fa91a8 2024-12-10T03:34:01,687 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7347a6a0954f4e22a706cf68a1b27cd8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7347a6a0954f4e22a706cf68a1b27cd8 2024-12-10T03:34:01,687 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/300427e56dc54ca6b18022b8a33226f5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/300427e56dc54ca6b18022b8a33226f5 2024-12-10T03:34:01,687 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a3c3cb181975485da79cc8576284e82b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a3c3cb181975485da79cc8576284e82b 2024-12-10T03:34:01,688 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d7b9f5ed3bbd497599087305d589ffc9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/d7b9f5ed3bbd497599087305d589ffc9 2024-12-10T03:34:01,689 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/b19713258592463c95c9f6cc184cc890 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/b19713258592463c95c9f6cc184cc890 2024-12-10T03:34:01,689 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c3d08f652dd64e749fcfc27949f49d29 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c3d08f652dd64e749fcfc27949f49d29 2024-12-10T03:34:01,689 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/11b8f0e07c0f44e19c07abffc5a098c0 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/11b8f0e07c0f44e19c07abffc5a098c0 2024-12-10T03:34:01,689 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/6a95dc5af51645ff988b92303c4fda31 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/6a95dc5af51645ff988b92303c4fda31 2024-12-10T03:34:01,690 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f5f46b1715594cd496dd071bca5bc255 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/f5f46b1715594cd496dd071bca5bc255 2024-12-10T03:34:01,690 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c7bd196f3a174b22bb873669b2682ad5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/c7bd196f3a174b22bb873669b2682ad5 2024-12-10T03:34:01,690 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a1125882790344868201783b7dd8774e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a1125882790344868201783b7dd8774e 2024-12-10T03:34:01,691 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/710aa0ea16754e968d5203ca31a9314a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/710aa0ea16754e968d5203ca31a9314a 2024-12-10T03:34:01,691 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5136547df5b4441f8e093bb09e3ffc99 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5136547df5b4441f8e093bb09e3ffc99 2024-12-10T03:34:01,694 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/21b724172cfd4ab3b720f993d355928e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/21b724172cfd4ab3b720f993d355928e 2024-12-10T03:34:01,694 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/425a75fc10b24ade820e946e5a9896f5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/425a75fc10b24ade820e946e5a9896f5 2024-12-10T03:34:01,694 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/834d71468662469c8886d7b1a74684fa to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/834d71468662469c8886d7b1a74684fa 2024-12-10T03:34:01,694 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5a1b829f95f441299f5a4b3dc2e36a45 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/5a1b829f95f441299f5a4b3dc2e36a45 2024-12-10T03:34:01,699 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/recovered.edits/615.seqid, newMaxSeqId=615, maxSeqId=1 2024-12-10T03:34:01,702 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d. 2024-12-10T03:34:01,702 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for c1d70420fe32ca56286cb5d739cf351d: 2024-12-10T03:34:01,704 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:34:01,705 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=c1d70420fe32ca56286cb5d739cf351d, regionState=CLOSED 2024-12-10T03:34:01,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-10T03:34:01,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure c1d70420fe32ca56286cb5d739cf351d, server=50b9ef1c5472,37553,1733801610862 in 1.5380 sec 2024-12-10T03:34:01,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-12-10T03:34:01,708 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c1d70420fe32ca56286cb5d739cf351d, UNASSIGN in 1.5420 sec 2024-12-10T03:34:01,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-10T03:34:01,710 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5470 sec 2024-12-10T03:34:01,711 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801641711"}]},"ts":"1733801641711"} 2024-12-10T03:34:01,712 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T03:34:01,752 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T03:34:01,754 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6240 sec 2024-12-10T03:34:02,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-10T03:34:02,244 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-10T03:34:02,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T03:34:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:02,257 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:02,258 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:02,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-10T03:34:02,261 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:34:02,266 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/recovered.edits] 2024-12-10T03:34:02,270 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6dae3a367aca4c4ba544a1551c5a583a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/6dae3a367aca4c4ba544a1551c5a583a 2024-12-10T03:34:02,270 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/e13c54bf3a574853908351845b1cfd24 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/e13c54bf3a574853908351845b1cfd24 2024-12-10T03:34:02,270 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f1bdbabaeec2406babdab8937721169d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/A/f1bdbabaeec2406babdab8937721169d 2024-12-10T03:34:02,273 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/2db5e7e2f37d422eb71f735155ef0cce to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/2db5e7e2f37d422eb71f735155ef0cce 2024-12-10T03:34:02,274 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7592a8c0000648e0a6235e8f5fa07e66 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/7592a8c0000648e0a6235e8f5fa07e66 2024-12-10T03:34:02,274 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d53e8cd3d9034babb2ce8183318b387a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/B/d53e8cd3d9034babb2ce8183318b387a 2024-12-10T03:34:02,277 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/04d0da7d7f724fd4bec9c9b937f00e58 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/04d0da7d7f724fd4bec9c9b937f00e58 2024-12-10T03:34:02,277 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ac3712a3bea4c77a30b5cfa65b6f743 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/7ac3712a3bea4c77a30b5cfa65b6f743 2024-12-10T03:34:02,277 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a8b46ac921c043dea38411f91e31e400 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/C/a8b46ac921c043dea38411f91e31e400 2024-12-10T03:34:02,280 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/recovered.edits/615.seqid to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d/recovered.edits/615.seqid 2024-12-10T03:34:02,280 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/c1d70420fe32ca56286cb5d739cf351d 2024-12-10T03:34:02,280 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T03:34:02,285 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:02,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-10T03:34:02,292 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T03:34:02,318 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T03:34:02,319 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:02,319 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T03:34:02,320 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733801642319"}]},"ts":"9223372036854775807"} 2024-12-10T03:34:02,323 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T03:34:02,323 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c1d70420fe32ca56286cb5d739cf351d, NAME => 'TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T03:34:02,323 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T03:34:02,323 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733801642323"}]},"ts":"9223372036854775807"} 2024-12-10T03:34:02,326 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T03:34:02,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-10T03:34:02,361 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:02,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 109 msec 2024-12-10T03:34:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-10T03:34:02,562 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-12-10T03:34:02,579 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=247 (was 219) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-627809423_22 at /127.0.0.1:40310 [Waiting for operation #399] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1054130048_22 at /127.0.0.1:33406 [Waiting for operation #402] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2dd385a3-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2dd385a3-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1054130048_22 at /127.0.0.1:39404 [Waiting for operation #27] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2dd385a3-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2dd385a3-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;50b9ef1c5472:37553-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=458 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=211 (was 98) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3480 (was 4013) 2024-12-10T03:34:02,588 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=247, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=211, ProcessCount=11, AvailableMemoryMB=3480 2024-12-10T03:34:02,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:34:02,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:34:02,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:02,591 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T03:34:02,592 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:02,592 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-12-10T03:34:02,592 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T03:34:02,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T03:34:02,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741971_1147 (size=963) 2024-12-10T03:34:02,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T03:34:02,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T03:34:03,003 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:34:03,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741972_1148 (size=53) 2024-12-10T03:34:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T03:34:03,416 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:34:03,416 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d58f0e0e176fadae27aa99803befaf76, disabling compactions & flushes 2024-12-10T03:34:03,416 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:03,417 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:03,417 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. after waiting 0 ms 2024-12-10T03:34:03,417 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:03,417 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:03,417 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:03,419 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T03:34:03,419 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733801643419"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733801643419"}]},"ts":"1733801643419"} 2024-12-10T03:34:03,422 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T03:34:03,423 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T03:34:03,424 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801643423"}]},"ts":"1733801643423"} 2024-12-10T03:34:03,425 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T03:34:03,442 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T03:34:03,445 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T03:34:03,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, ASSIGN}] 2024-12-10T03:34:03,470 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, ASSIGN 2024-12-10T03:34:03,470 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, ASSIGN; state=OFFLINE, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=false 2024-12-10T03:34:03,621 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=d58f0e0e176fadae27aa99803befaf76, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:03,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:03,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T03:34:03,776 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:03,784 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:03,785 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:34:03,786 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,786 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:34:03,786 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,786 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,789 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,791 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:03,791 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d58f0e0e176fadae27aa99803befaf76 columnFamilyName A 2024-12-10T03:34:03,791 DEBUG [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:03,792 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(327): Store=d58f0e0e176fadae27aa99803befaf76/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:03,792 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,793 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:03,794 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d58f0e0e176fadae27aa99803befaf76 columnFamilyName B 2024-12-10T03:34:03,794 DEBUG [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:03,795 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(327): Store=d58f0e0e176fadae27aa99803befaf76/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:03,795 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,796 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:03,796 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d58f0e0e176fadae27aa99803befaf76 columnFamilyName C 2024-12-10T03:34:03,796 DEBUG [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:03,797 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(327): Store=d58f0e0e176fadae27aa99803befaf76/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:03,797 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:03,798 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,798 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,800 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:34:03,801 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:03,804 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:34:03,804 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened d58f0e0e176fadae27aa99803befaf76; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72700862, jitterRate=0.0833272635936737}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:34:03,805 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:03,806 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., pid=39, masterSystemTime=1733801643776 2024-12-10T03:34:03,807 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:03,807 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:03,808 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=d58f0e0e176fadae27aa99803befaf76, regionState=OPEN, openSeqNum=2, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:03,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-10T03:34:03,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 in 186 msec 2024-12-10T03:34:03,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-10T03:34:03,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, ASSIGN in 341 msec 2024-12-10T03:34:03,813 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T03:34:03,813 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801643813"}]},"ts":"1733801643813"} 2024-12-10T03:34:03,814 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T03:34:03,845 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T03:34:03,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2550 sec 2024-12-10T03:34:04,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T03:34:04,702 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-12-10T03:34:04,706 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x407e6b5c to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6eb305fc 2024-12-10T03:34:04,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@245d85d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:04,749 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:04,753 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:04,757 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T03:34:04,758 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T03:34:04,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:34:04,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:34:04,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:04,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741973_1149 (size=999) 2024-12-10T03:34:05,182 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T03:34:05,182 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T03:34:05,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:34:05,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, REOPEN/MOVE}] 2024-12-10T03:34:05,202 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, REOPEN/MOVE 2024-12-10T03:34:05,202 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=d58f0e0e176fadae27aa99803befaf76, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,203 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:34:05,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:05,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,357 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,357 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:34:05,357 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing d58f0e0e176fadae27aa99803befaf76, disabling compactions & flushes 2024-12-10T03:34:05,357 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:05,357 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:05,358 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. after waiting 0 ms 2024-12-10T03:34:05,358 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:05,391 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T03:34:05,393 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:05,393 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:05,393 WARN [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: d58f0e0e176fadae27aa99803befaf76 to self. 2024-12-10T03:34:05,396 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,397 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=d58f0e0e176fadae27aa99803befaf76, regionState=CLOSED 2024-12-10T03:34:05,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-10T03:34:05,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 in 196 msec 2024-12-10T03:34:05,404 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, REOPEN/MOVE; state=CLOSED, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=true 2024-12-10T03:34:05,554 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=d58f0e0e176fadae27aa99803befaf76, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:05,708 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,713 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:05,713 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:34:05,714 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,715 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:34:05,715 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,715 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,721 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,722 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:05,729 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d58f0e0e176fadae27aa99803befaf76 columnFamilyName A 2024-12-10T03:34:05,731 DEBUG [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:05,732 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(327): Store=d58f0e0e176fadae27aa99803befaf76/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:05,732 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,733 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:05,733 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d58f0e0e176fadae27aa99803befaf76 columnFamilyName B 2024-12-10T03:34:05,733 DEBUG [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:05,734 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(327): Store=d58f0e0e176fadae27aa99803befaf76/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:05,734 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,734 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:05,734 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d58f0e0e176fadae27aa99803befaf76 columnFamilyName C 2024-12-10T03:34:05,734 DEBUG [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:05,735 INFO [StoreOpener-d58f0e0e176fadae27aa99803befaf76-1 {}] regionserver.HStore(327): Store=d58f0e0e176fadae27aa99803befaf76/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:05,735 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:05,736 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,737 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,738 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:34:05,739 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,740 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened d58f0e0e176fadae27aa99803befaf76; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74179534, jitterRate=0.10536119341850281}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:34:05,742 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:05,742 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., pid=44, masterSystemTime=1733801645707 2024-12-10T03:34:05,744 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:05,744 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:05,744 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=d58f0e0e176fadae27aa99803befaf76, regionState=OPEN, openSeqNum=5, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,746 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-12-10T03:34:05,746 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 in 189 msec 2024-12-10T03:34:05,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-10T03:34:05,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, REOPEN/MOVE in 545 msec 2024-12-10T03:34:05,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-10T03:34:05,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 557 msec 2024-12-10T03:34:05,752 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 986 msec 2024-12-10T03:34:05,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-10T03:34:05,760 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b4bd1ba to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@491ea2ee 2024-12-10T03:34:05,792 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328f994d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,793 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ebda6ad to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b44b1e5 2024-12-10T03:34:05,803 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a9306be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,804 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x505d5ccd to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46114993 2024-12-10T03:34:05,811 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769942d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,812 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x367f47f7 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2885d2d9 2024-12-10T03:34:05,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@247c0c93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,821 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78cafade to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@152377d4 2024-12-10T03:34:05,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517ff977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,829 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-12-10T03:34:05,836 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,837 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08ba8425 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a11164b 2024-12-10T03:34:05,844 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c38ee58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,845 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7af61386 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a7e1dd 2024-12-10T03:34:05,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@630684bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,854 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x063e87c8 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31a027db 2024-12-10T03:34:05,861 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66547e2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:05,866 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:05,866 DEBUG [hconnection-0x78b8fca0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-10T03:34:05,867 DEBUG [hconnection-0x57c53fe4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,868 DEBUG [hconnection-0x3f96faad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,868 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:05,869 DEBUG [hconnection-0x420eadaa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,869 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60880, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,869 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,870 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T03:34:05,870 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:05,870 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,871 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60904, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,872 DEBUG [hconnection-0x6ebb124e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,872 DEBUG [hconnection-0x3627e90e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,872 DEBUG [hconnection-0x13563c7b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,873 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,873 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60920, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,874 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60944, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,876 DEBUG [hconnection-0x3afb7273-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,878 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60950, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,879 DEBUG [hconnection-0x2663b309-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:05,880 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:05,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:34:05,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:05,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:05,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:05,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:05,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:05,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:05,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801705923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:05,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801705926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801705926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121063f104ff77ae4e6aac64acf91f0ce647_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801645881/Put/seqid=0 2024-12-10T03:34:05,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801705928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801705930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:05,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741974_1150 (size=12154) 2024-12-10T03:34:05,953 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:05,958 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121063f104ff77ae4e6aac64acf91f0ce647_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121063f104ff77ae4e6aac64acf91f0ce647_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:05,959 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/22e81ff7afd84e82928262693d9ec126, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:05,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/22e81ff7afd84e82928262693d9ec126 is 175, key is test_row_0/A:col10/1733801645881/Put/seqid=0 2024-12-10T03:34:05,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T03:34:05,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741975_1151 (size=30955) 2024-12-10T03:34:05,981 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/22e81ff7afd84e82928262693d9ec126 2024-12-10T03:34:06,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/68c53adfa9494dafb33a3adf1dabbd7a is 50, key is test_row_0/B:col10/1733801645881/Put/seqid=0 2024-12-10T03:34:06,026 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:06,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:06,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801706034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801706034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801706035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801706035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,039 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801706038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741976_1152 (size=12001) 2024-12-10T03:34:06,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/68c53adfa9494dafb33a3adf1dabbd7a 2024-12-10T03:34:06,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/aad636305f5b4e988d532e5476ab77d3 is 50, key is test_row_0/C:col10/1733801645881/Put/seqid=0 2024-12-10T03:34:06,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741977_1153 (size=12001) 2024-12-10T03:34:06,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T03:34:06,179 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:06,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:06,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801706237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801706237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801706238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801706238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801706242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,331 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:06,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:06,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T03:34:06,485 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:06,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:06,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/aad636305f5b4e988d532e5476ab77d3 2024-12-10T03:34:06,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/22e81ff7afd84e82928262693d9ec126 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/22e81ff7afd84e82928262693d9ec126 2024-12-10T03:34:06,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/22e81ff7afd84e82928262693d9ec126, entries=150, sequenceid=15, filesize=30.2 K 2024-12-10T03:34:06,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/68c53adfa9494dafb33a3adf1dabbd7a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/68c53adfa9494dafb33a3adf1dabbd7a 2024-12-10T03:34:06,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/68c53adfa9494dafb33a3adf1dabbd7a, entries=150, sequenceid=15, filesize=11.7 K 2024-12-10T03:34:06,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/aad636305f5b4e988d532e5476ab77d3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/aad636305f5b4e988d532e5476ab77d3 2024-12-10T03:34:06,536 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/aad636305f5b4e988d532e5476ab77d3, entries=150, sequenceid=15, filesize=11.7 K 2024-12-10T03:34:06,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for d58f0e0e176fadae27aa99803befaf76 in 650ms, sequenceid=15, compaction requested=false 2024-12-10T03:34:06,537 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-10T03:34:06,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:06,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:06,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T03:34:06,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:06,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:06,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:06,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:06,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:06,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:06,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801706555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801706559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801706561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801706561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801706561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210774bed0ae2be40ba9602047c07af10e7_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801646547/Put/seqid=0 2024-12-10T03:34:06,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741978_1154 (size=14594) 2024-12-10T03:34:06,637 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:06,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:06,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801706663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801706663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801706665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801706665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801706665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,791 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:06,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:06,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801706865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801706865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801706868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801706868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:06,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801706868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,944 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:06,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:06,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:06,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:06,945 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:06,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T03:34:07,002 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:07,006 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210774bed0ae2be40ba9602047c07af10e7_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210774bed0ae2be40ba9602047c07af10e7_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:07,007 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/8103c729a4424b478e8c4aec871bb11c, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:07,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/8103c729a4424b478e8c4aec871bb11c is 175, key is test_row_0/A:col10/1733801646547/Put/seqid=0 2024-12-10T03:34:07,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741979_1155 (size=39549) 2024-12-10T03:34:07,013 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/8103c729a4424b478e8c4aec871bb11c 2024-12-10T03:34:07,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/b22fa2cf6127486d8e3ae14a277143a6 is 50, key is test_row_0/B:col10/1733801646547/Put/seqid=0 2024-12-10T03:34:07,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741980_1156 (size=12001) 2024-12-10T03:34:07,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/b22fa2cf6127486d8e3ae14a277143a6 2024-12-10T03:34:07,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/176a0ad48a9247d2a9a7b242c766169d is 50, key is test_row_0/C:col10/1733801646547/Put/seqid=0 2024-12-10T03:34:07,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741981_1157 (size=12001) 2024-12-10T03:34:07,074 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/176a0ad48a9247d2a9a7b242c766169d 2024-12-10T03:34:07,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/8103c729a4424b478e8c4aec871bb11c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/8103c729a4424b478e8c4aec871bb11c 2024-12-10T03:34:07,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/8103c729a4424b478e8c4aec871bb11c, entries=200, sequenceid=42, filesize=38.6 K 2024-12-10T03:34:07,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/b22fa2cf6127486d8e3ae14a277143a6 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/b22fa2cf6127486d8e3ae14a277143a6 2024-12-10T03:34:07,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/b22fa2cf6127486d8e3ae14a277143a6, entries=150, sequenceid=42, filesize=11.7 K 2024-12-10T03:34:07,097 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/176a0ad48a9247d2a9a7b242c766169d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/176a0ad48a9247d2a9a7b242c766169d 2024-12-10T03:34:07,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:07,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:07,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,098 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/176a0ad48a9247d2a9a7b242c766169d, entries=150, sequenceid=42, filesize=11.7 K 2024-12-10T03:34:07,107 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=53.67 KB/54960 for d58f0e0e176fadae27aa99803befaf76 in 558ms, sequenceid=42, compaction requested=false 2024-12-10T03:34:07,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:07,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:07,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T03:34:07,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:07,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:07,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:07,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:07,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:07,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:07,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ec7cf643d3a3486c97540129e2fe7bff_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801647171/Put/seqid=0 2024-12-10T03:34:07,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741982_1158 (size=12154) 2024-12-10T03:34:07,194 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:07,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801707196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801707194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801707197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801707198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801707197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,207 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ec7cf643d3a3486c97540129e2fe7bff_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ec7cf643d3a3486c97540129e2fe7bff_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:07,210 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/42fb424984134adc9d8a49a2d2069c19, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:07,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/42fb424984134adc9d8a49a2d2069c19 is 175, key is test_row_0/A:col10/1733801647171/Put/seqid=0 2024-12-10T03:34:07,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741983_1159 (size=30955) 2024-12-10T03:34:07,242 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T03:34:07,250 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:07,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:07,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801707301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801707301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801707302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801707303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801707303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,405 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:07,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:07,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,405 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801707504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801707505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801707505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801707507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801707507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:07,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:07,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:07,622 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/42fb424984134adc9d8a49a2d2069c19 2024-12-10T03:34:07,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/95300adea00643129f8383f91d736bed is 50, key is test_row_0/B:col10/1733801647171/Put/seqid=0 2024-12-10T03:34:07,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741984_1160 (size=12001) 2024-12-10T03:34:07,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/95300adea00643129f8383f91d736bed 2024-12-10T03:34:07,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/4e0fec1a08a642aeb6ee899562606fad is 50, key is test_row_0/C:col10/1733801647171/Put/seqid=0 2024-12-10T03:34:07,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741985_1161 (size=12001) 2024-12-10T03:34:07,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/4e0fec1a08a642aeb6ee899562606fad 2024-12-10T03:34:07,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/42fb424984134adc9d8a49a2d2069c19 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/42fb424984134adc9d8a49a2d2069c19 2024-12-10T03:34:07,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/42fb424984134adc9d8a49a2d2069c19, entries=150, sequenceid=55, filesize=30.2 K 2024-12-10T03:34:07,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/95300adea00643129f8383f91d736bed as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/95300adea00643129f8383f91d736bed 2024-12-10T03:34:07,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/95300adea00643129f8383f91d736bed, entries=150, sequenceid=55, filesize=11.7 K 2024-12-10T03:34:07,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/4e0fec1a08a642aeb6ee899562606fad as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4e0fec1a08a642aeb6ee899562606fad 2024-12-10T03:34:07,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4e0fec1a08a642aeb6ee899562606fad, entries=150, sequenceid=55, filesize=11.7 K 2024-12-10T03:34:07,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for d58f0e0e176fadae27aa99803befaf76 in 520ms, sequenceid=55, compaction requested=true 2024-12-10T03:34:07,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:07,692 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:07,692 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:07,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:07,693 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:07,693 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:07,693 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/B is initiating minor compaction (all files) 2024-12-10T03:34:07,693 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/A is initiating minor compaction (all files) 2024-12-10T03:34:07,693 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/B in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,693 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/A in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,693 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/22e81ff7afd84e82928262693d9ec126, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/8103c729a4424b478e8c4aec871bb11c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/42fb424984134adc9d8a49a2d2069c19] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=99.1 K 2024-12-10T03:34:07,693 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/68c53adfa9494dafb33a3adf1dabbd7a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/b22fa2cf6127486d8e3ae14a277143a6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/95300adea00643129f8383f91d736bed] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=35.2 K 2024-12-10T03:34:07,693 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,693 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/22e81ff7afd84e82928262693d9ec126, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/8103c729a4424b478e8c4aec871bb11c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/42fb424984134adc9d8a49a2d2069c19] 2024-12-10T03:34:07,693 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 68c53adfa9494dafb33a3adf1dabbd7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733801645875 2024-12-10T03:34:07,694 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22e81ff7afd84e82928262693d9ec126, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733801645875 2024-12-10T03:34:07,694 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting b22fa2cf6127486d8e3ae14a277143a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733801645925 2024-12-10T03:34:07,694 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8103c729a4424b478e8c4aec871bb11c, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733801645913 2024-12-10T03:34:07,694 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 95300adea00643129f8383f91d736bed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733801646553 2024-12-10T03:34:07,694 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42fb424984134adc9d8a49a2d2069c19, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733801646553 2024-12-10T03:34:07,708 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:07,710 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,712 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412105502d2d99dd04d3eafa1332a706723dd_d58f0e0e176fadae27aa99803befaf76 store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:07,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T03:34:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:07,716 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T03:34:07,716 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#B#compaction#142 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:07,716 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/44f82d9b0a8a48abbdb7d95ac205a2ec is 50, key is test_row_0/B:col10/1733801647171/Put/seqid=0 2024-12-10T03:34:07,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:07,722 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412105502d2d99dd04d3eafa1332a706723dd_d58f0e0e176fadae27aa99803befaf76, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:07,722 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412105502d2d99dd04d3eafa1332a706723dd_d58f0e0e176fadae27aa99803befaf76 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:07,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106bfc0a825bec4d09abf6c0d85eab451f_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801647194/Put/seqid=0 2024-12-10T03:34:07,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741986_1162 (size=12104) 2024-12-10T03:34:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741987_1163 (size=4469) 2024-12-10T03:34:07,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741988_1164 (size=12154) 2024-12-10T03:34:07,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:07,776 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106bfc0a825bec4d09abf6c0d85eab451f_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106bfc0a825bec4d09abf6c0d85eab451f_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:07,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bccb73f10040413397f8de8021a9623a, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:07,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bccb73f10040413397f8de8021a9623a is 175, key is test_row_0/A:col10/1733801647194/Put/seqid=0 2024-12-10T03:34:07,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741989_1165 (size=30955) 2024-12-10T03:34:07,786 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bccb73f10040413397f8de8021a9623a 2024-12-10T03:34:07,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/8e160474cf104fb997a02ae83e1cbb2e is 50, key is test_row_0/B:col10/1733801647194/Put/seqid=0 2024-12-10T03:34:07,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741990_1166 (size=12001) 2024-12-10T03:34:07,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:07,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:07,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801707814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801707816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801707818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801707819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801707819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801707920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801707921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801707923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801707924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:07,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801707924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:07,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T03:34:08,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801708123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801708124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801708128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801708128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801708130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,165 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/44f82d9b0a8a48abbdb7d95ac205a2ec as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/44f82d9b0a8a48abbdb7d95ac205a2ec 2024-12-10T03:34:08,166 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#A#compaction#141 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:08,168 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/e68b7f4de9154691b085807e7f5fc16b is 175, key is test_row_0/A:col10/1733801647171/Put/seqid=0 2024-12-10T03:34:08,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741991_1167 (size=31058) 2024-12-10T03:34:08,175 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/B of d58f0e0e176fadae27aa99803befaf76 into 44f82d9b0a8a48abbdb7d95ac205a2ec(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:08,175 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:08,175 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/B, priority=13, startTime=1733801647692; duration=0sec 2024-12-10T03:34:08,175 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:08,175 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:B 2024-12-10T03:34:08,175 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:08,178 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:08,178 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/C is initiating minor compaction (all files) 2024-12-10T03:34:08,178 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/C in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:08,178 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/aad636305f5b4e988d532e5476ab77d3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/176a0ad48a9247d2a9a7b242c766169d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4e0fec1a08a642aeb6ee899562606fad] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=35.2 K 2024-12-10T03:34:08,180 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting aad636305f5b4e988d532e5476ab77d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733801645875 2024-12-10T03:34:08,180 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/e68b7f4de9154691b085807e7f5fc16b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/e68b7f4de9154691b085807e7f5fc16b 2024-12-10T03:34:08,180 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 176a0ad48a9247d2a9a7b242c766169d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733801645925 2024-12-10T03:34:08,181 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e0fec1a08a642aeb6ee899562606fad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733801646553 2024-12-10T03:34:08,186 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/A of d58f0e0e176fadae27aa99803befaf76 into e68b7f4de9154691b085807e7f5fc16b(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:08,186 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:08,186 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/A, priority=13, startTime=1733801647692; duration=0sec 2024-12-10T03:34:08,186 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:08,186 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:A 2024-12-10T03:34:08,190 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#C#compaction#145 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:08,191 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/7742142f4ae540b5a270b7a7941344e6 is 50, key is test_row_0/C:col10/1733801647171/Put/seqid=0 2024-12-10T03:34:08,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741992_1168 (size=12104) 2024-12-10T03:34:08,202 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/8e160474cf104fb997a02ae83e1cbb2e 2024-12-10T03:34:08,209 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/7742142f4ae540b5a270b7a7941344e6 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/7742142f4ae540b5a270b7a7941344e6 2024-12-10T03:34:08,215 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/C of d58f0e0e176fadae27aa99803befaf76 into 7742142f4ae540b5a270b7a7941344e6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:08,215 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:08,215 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/C, priority=13, startTime=1733801647692; duration=0sec 2024-12-10T03:34:08,215 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:08,215 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:C 2024-12-10T03:34:08,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/fcb2bd33594a458cb755e3eb30b81ab1 is 50, key is test_row_0/C:col10/1733801647194/Put/seqid=0 2024-12-10T03:34:08,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741993_1169 (size=12001) 2024-12-10T03:34:08,223 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/fcb2bd33594a458cb755e3eb30b81ab1 2024-12-10T03:34:08,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bccb73f10040413397f8de8021a9623a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bccb73f10040413397f8de8021a9623a 2024-12-10T03:34:08,231 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bccb73f10040413397f8de8021a9623a, entries=150, sequenceid=79, filesize=30.2 K 2024-12-10T03:34:08,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/8e160474cf104fb997a02ae83e1cbb2e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/8e160474cf104fb997a02ae83e1cbb2e 2024-12-10T03:34:08,237 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/8e160474cf104fb997a02ae83e1cbb2e, entries=150, sequenceid=79, filesize=11.7 K 2024-12-10T03:34:08,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/fcb2bd33594a458cb755e3eb30b81ab1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/fcb2bd33594a458cb755e3eb30b81ab1 2024-12-10T03:34:08,246 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/fcb2bd33594a458cb755e3eb30b81ab1, entries=150, sequenceid=79, filesize=11.7 K 2024-12-10T03:34:08,248 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d58f0e0e176fadae27aa99803befaf76 in 531ms, sequenceid=79, compaction requested=false 2024-12-10T03:34:08,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:08,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:08,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-10T03:34:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-10T03:34:08,253 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-10T03:34:08,253 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3800 sec 2024-12-10T03:34:08,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 2.3880 sec 2024-12-10T03:34:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:08,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:34:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:08,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104c7d47a6604245ecae59d80df27bb75e_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801647817/Put/seqid=0 2024-12-10T03:34:08,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801708454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741994_1170 (size=12154) 2024-12-10T03:34:08,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801708454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801708454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,458 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801708455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801708456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,464 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104c7d47a6604245ecae59d80df27bb75e_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104c7d47a6604245ecae59d80df27bb75e_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:08,465 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/a1e10a1db1bb49d78c058be261c8f21d, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:08,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/a1e10a1db1bb49d78c058be261c8f21d is 175, key is test_row_0/A:col10/1733801647817/Put/seqid=0 2024-12-10T03:34:08,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741995_1171 (size=30955) 2024-12-10T03:34:08,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801708558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801708559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801708561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801708561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801708562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801708760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801708760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801708762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801708763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801708766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:08,875 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/a1e10a1db1bb49d78c058be261c8f21d 2024-12-10T03:34:08,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/2c9e6b7475c042f5bd7dcc46fdf481c8 is 50, key is test_row_0/B:col10/1733801647817/Put/seqid=0 2024-12-10T03:34:08,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741996_1172 (size=12001) 2024-12-10T03:34:09,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801709064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801709068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801709069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801709070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801709070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/2c9e6b7475c042f5bd7dcc46fdf481c8 2024-12-10T03:34:09,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/65511ae70142469ca391c637009b42e9 is 50, key is test_row_0/C:col10/1733801647817/Put/seqid=0 2024-12-10T03:34:09,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741997_1173 (size=12001) 2024-12-10T03:34:09,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801709571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801709571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801709572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801709573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:09,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801709575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:09,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/65511ae70142469ca391c637009b42e9 2024-12-10T03:34:09,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/a1e10a1db1bb49d78c058be261c8f21d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/a1e10a1db1bb49d78c058be261c8f21d 2024-12-10T03:34:09,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/a1e10a1db1bb49d78c058be261c8f21d, entries=150, sequenceid=96, filesize=30.2 K 2024-12-10T03:34:09,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/2c9e6b7475c042f5bd7dcc46fdf481c8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/2c9e6b7475c042f5bd7dcc46fdf481c8 2024-12-10T03:34:09,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/2c9e6b7475c042f5bd7dcc46fdf481c8, entries=150, sequenceid=96, filesize=11.7 K 2024-12-10T03:34:09,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/65511ae70142469ca391c637009b42e9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/65511ae70142469ca391c637009b42e9 2024-12-10T03:34:09,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/65511ae70142469ca391c637009b42e9, entries=150, sequenceid=96, filesize=11.7 K 2024-12-10T03:34:09,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d58f0e0e176fadae27aa99803befaf76 in 1320ms, sequenceid=96, compaction requested=true 2024-12-10T03:34:09,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:09,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:09,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:09,756 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:09,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:09,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:09,756 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:09,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:09,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:09,757 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:09,757 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:09,757 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/A is initiating minor compaction (all files) 2024-12-10T03:34:09,757 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/B is initiating minor compaction (all files) 2024-12-10T03:34:09,757 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/B in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:09,757 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/A in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:09,757 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/e68b7f4de9154691b085807e7f5fc16b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bccb73f10040413397f8de8021a9623a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/a1e10a1db1bb49d78c058be261c8f21d] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=90.8 K 2024-12-10T03:34:09,757 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/44f82d9b0a8a48abbdb7d95ac205a2ec, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/8e160474cf104fb997a02ae83e1cbb2e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/2c9e6b7475c042f5bd7dcc46fdf481c8] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=35.3 K 2024-12-10T03:34:09,757 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:09,757 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/e68b7f4de9154691b085807e7f5fc16b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bccb73f10040413397f8de8021a9623a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/a1e10a1db1bb49d78c058be261c8f21d] 2024-12-10T03:34:09,757 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 44f82d9b0a8a48abbdb7d95ac205a2ec, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733801646553 2024-12-10T03:34:09,757 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting e68b7f4de9154691b085807e7f5fc16b, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733801646553 2024-12-10T03:34:09,758 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e160474cf104fb997a02ae83e1cbb2e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733801647194 2024-12-10T03:34:09,758 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting bccb73f10040413397f8de8021a9623a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733801647194 2024-12-10T03:34:09,758 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c9e6b7475c042f5bd7dcc46fdf481c8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733801647817 2024-12-10T03:34:09,758 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1e10a1db1bb49d78c058be261c8f21d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733801647817 2024-12-10T03:34:09,768 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:09,769 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#B#compaction#151 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:09,770 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/f069e0ca24f94456a8e8180ec67f07c5 is 50, key is test_row_0/B:col10/1733801647817/Put/seqid=0 2024-12-10T03:34:09,770 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121030d598b2e1fb4dfb863a5455cbda0496_d58f0e0e176fadae27aa99803befaf76 store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:09,773 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121030d598b2e1fb4dfb863a5455cbda0496_d58f0e0e176fadae27aa99803befaf76, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:09,773 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121030d598b2e1fb4dfb863a5455cbda0496_d58f0e0e176fadae27aa99803befaf76 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:09,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741998_1174 (size=12207) 2024-12-10T03:34:09,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741999_1175 (size=4469) 2024-12-10T03:34:09,794 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#A#compaction#150 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:09,795 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b7a87f0021f241c8aada33ee667ae252 is 175, key is test_row_0/A:col10/1733801647817/Put/seqid=0 2024-12-10T03:34:09,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742000_1176 (size=31161) 2024-12-10T03:34:09,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T03:34:09,975 INFO [Thread-729 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-10T03:34:09,976 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:09,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-10T03:34:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T03:34:09,978 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:09,978 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:09,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:10,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T03:34:10,129 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T03:34:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:10,130 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T03:34:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:10,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:10,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:10,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104c6ae5d886d54998925a6939288268c4_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801648454/Put/seqid=0 2024-12-10T03:34:10,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742001_1177 (size=12154) 2024-12-10T03:34:10,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:10,164 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104c6ae5d886d54998925a6939288268c4_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104c6ae5d886d54998925a6939288268c4_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:10,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/30fda6c2b21c4c508e654606838f68c4, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:10,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/30fda6c2b21c4c508e654606838f68c4 is 175, key is test_row_0/A:col10/1733801648454/Put/seqid=0 2024-12-10T03:34:10,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742002_1178 (size=30955) 2024-12-10T03:34:10,192 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/f069e0ca24f94456a8e8180ec67f07c5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/f069e0ca24f94456a8e8180ec67f07c5 2024-12-10T03:34:10,202 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/B of d58f0e0e176fadae27aa99803befaf76 into f069e0ca24f94456a8e8180ec67f07c5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:10,202 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:10,203 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/B, priority=13, startTime=1733801649756; duration=0sec 2024-12-10T03:34:10,203 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:10,203 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:B 2024-12-10T03:34:10,203 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:10,207 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:10,207 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/C is initiating minor compaction (all files) 2024-12-10T03:34:10,207 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/C in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:10,207 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/7742142f4ae540b5a270b7a7941344e6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/fcb2bd33594a458cb755e3eb30b81ab1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/65511ae70142469ca391c637009b42e9] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=35.3 K 2024-12-10T03:34:10,208 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7742142f4ae540b5a270b7a7941344e6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733801646553 2024-12-10T03:34:10,209 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting fcb2bd33594a458cb755e3eb30b81ab1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733801647194 2024-12-10T03:34:10,209 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 65511ae70142469ca391c637009b42e9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733801647817 2024-12-10T03:34:10,211 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b7a87f0021f241c8aada33ee667ae252 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b7a87f0021f241c8aada33ee667ae252 2024-12-10T03:34:10,218 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/A of d58f0e0e176fadae27aa99803befaf76 into b7a87f0021f241c8aada33ee667ae252(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:10,218 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:10,218 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/A, priority=13, startTime=1733801649755; duration=0sec 2024-12-10T03:34:10,218 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:10,218 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:A 2024-12-10T03:34:10,223 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#C#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:10,223 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/00651e294a374c6d91b1f3e5e4e195f7 is 50, key is test_row_0/C:col10/1733801647817/Put/seqid=0 2024-12-10T03:34:10,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742003_1179 (size=12207) 2024-12-10T03:34:10,252 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/00651e294a374c6d91b1f3e5e4e195f7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/00651e294a374c6d91b1f3e5e4e195f7 2024-12-10T03:34:10,259 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/C of d58f0e0e176fadae27aa99803befaf76 into 00651e294a374c6d91b1f3e5e4e195f7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:10,259 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:10,259 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/C, priority=13, startTime=1733801649756; duration=0sec 2024-12-10T03:34:10,259 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:10,259 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:C 2024-12-10T03:34:10,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T03:34:10,573 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/30fda6c2b21c4c508e654606838f68c4 2024-12-10T03:34:10,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:10,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:10,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/88d035d68b7a4156ab3847b3d901685e is 50, key is test_row_0/B:col10/1733801648454/Put/seqid=0 2024-12-10T03:34:10,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T03:34:10,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742004_1180 (size=12001) 2024-12-10T03:34:10,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801710583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801710586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801710586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801710587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801710587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801710690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801710690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801710690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801710690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801710690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801710893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801710894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801710894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801710894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:10,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:10,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801710896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,005 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/88d035d68b7a4156ab3847b3d901685e 2024-12-10T03:34:11,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/406b5f4fe7974a529b204d3ae619e9f7 is 50, key is test_row_0/C:col10/1733801648454/Put/seqid=0 2024-12-10T03:34:11,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742005_1181 (size=12001) 2024-12-10T03:34:11,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T03:34:11,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801711197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801711198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801711198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801711199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801711199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,429 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/406b5f4fe7974a529b204d3ae619e9f7 2024-12-10T03:34:11,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/30fda6c2b21c4c508e654606838f68c4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/30fda6c2b21c4c508e654606838f68c4 2024-12-10T03:34:11,438 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/30fda6c2b21c4c508e654606838f68c4, entries=150, sequenceid=118, filesize=30.2 K 2024-12-10T03:34:11,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/88d035d68b7a4156ab3847b3d901685e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/88d035d68b7a4156ab3847b3d901685e 2024-12-10T03:34:11,443 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/88d035d68b7a4156ab3847b3d901685e, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T03:34:11,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/406b5f4fe7974a529b204d3ae619e9f7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/406b5f4fe7974a529b204d3ae619e9f7 2024-12-10T03:34:11,448 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/406b5f4fe7974a529b204d3ae619e9f7, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T03:34:11,449 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d58f0e0e176fadae27aa99803befaf76 in 1319ms, sequenceid=118, compaction requested=false 2024-12-10T03:34:11,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:11,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:11,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-10T03:34:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-10T03:34:11,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-10T03:34:11,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4720 sec 2024-12-10T03:34:11,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.4750 sec 2024-12-10T03:34:11,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:11,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T03:34:11,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:11,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:11,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:11,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:11,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:11,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:11,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210112a167d10e54ad1ba087ed849478b32_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801650585/Put/seqid=0 2024-12-10T03:34:11,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801711722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801711722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801711723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801711723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801711724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742006_1182 (size=14794) 2024-12-10T03:34:11,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801711827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801711827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801711827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801711828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:11,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801711828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801712029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801712030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801712031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801712031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801712033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T03:34:12,083 INFO [Thread-729 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-10T03:34:12,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:12,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-10T03:34:12,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T03:34:12,085 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:12,085 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:12,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:12,130 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:12,135 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210112a167d10e54ad1ba087ed849478b32_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210112a167d10e54ad1ba087ed849478b32_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:12,136 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/7b8d31d8ff2c4665b53666047fd8a551, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:12,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/7b8d31d8ff2c4665b53666047fd8a551 is 175, key is test_row_0/A:col10/1733801650585/Put/seqid=0 2024-12-10T03:34:12,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742007_1183 (size=39749) 2024-12-10T03:34:12,143 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=138, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/7b8d31d8ff2c4665b53666047fd8a551 2024-12-10T03:34:12,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/1aa8bf3b0b064781a82e6922238af60f is 50, key is test_row_0/B:col10/1733801650585/Put/seqid=0 2024-12-10T03:34:12,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742008_1184 (size=12151) 2024-12-10T03:34:12,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T03:34:12,236 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T03:34:12,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:12,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801712332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801712333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801712335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801712336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801712336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T03:34:12,389 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T03:34:12,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:12,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,390 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,542 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,542 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T03:34:12,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:12,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/1aa8bf3b0b064781a82e6922238af60f 2024-12-10T03:34:12,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/f8e831e3d3264757addda0ee6120dac8 is 50, key is test_row_0/C:col10/1733801650585/Put/seqid=0 2024-12-10T03:34:12,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742009_1185 (size=12151) 2024-12-10T03:34:12,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T03:34:12,694 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T03:34:12,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:12,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801712835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801712837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801712838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801712838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:12,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801712840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,847 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:12,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T03:34:12,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:12,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:12,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/f8e831e3d3264757addda0ee6120dac8 2024-12-10T03:34:12,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/7b8d31d8ff2c4665b53666047fd8a551 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/7b8d31d8ff2c4665b53666047fd8a551 2024-12-10T03:34:12,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/7b8d31d8ff2c4665b53666047fd8a551, entries=200, sequenceid=138, filesize=38.8 K 2024-12-10T03:34:12,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/1aa8bf3b0b064781a82e6922238af60f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1aa8bf3b0b064781a82e6922238af60f 2024-12-10T03:34:12,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1aa8bf3b0b064781a82e6922238af60f, entries=150, sequenceid=138, filesize=11.9 K 2024-12-10T03:34:12,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/f8e831e3d3264757addda0ee6120dac8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/f8e831e3d3264757addda0ee6120dac8 2024-12-10T03:34:12,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/f8e831e3d3264757addda0ee6120dac8, entries=150, sequenceid=138, filesize=11.9 K 2024-12-10T03:34:12,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for d58f0e0e176fadae27aa99803befaf76 in 1283ms, sequenceid=138, compaction requested=true 2024-12-10T03:34:12,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:12,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:12,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:12,991 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:12,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:12,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:12,991 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:12,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:12,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:12,992 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:12,992 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:12,992 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/A is initiating minor compaction (all files) 2024-12-10T03:34:12,992 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/B is initiating minor compaction (all files) 2024-12-10T03:34:12,992 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/A in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,992 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b7a87f0021f241c8aada33ee667ae252, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/30fda6c2b21c4c508e654606838f68c4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/7b8d31d8ff2c4665b53666047fd8a551] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=99.5 K 2024-12-10T03:34:12,992 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,992 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b7a87f0021f241c8aada33ee667ae252, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/30fda6c2b21c4c508e654606838f68c4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/7b8d31d8ff2c4665b53666047fd8a551] 2024-12-10T03:34:12,992 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/B in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:12,992 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/f069e0ca24f94456a8e8180ec67f07c5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/88d035d68b7a4156ab3847b3d901685e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1aa8bf3b0b064781a82e6922238af60f] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=35.5 K 2024-12-10T03:34:12,993 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7a87f0021f241c8aada33ee667ae252, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733801647817 2024-12-10T03:34:12,993 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f069e0ca24f94456a8e8180ec67f07c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733801647817 2024-12-10T03:34:12,993 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30fda6c2b21c4c508e654606838f68c4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801648452 2024-12-10T03:34:12,993 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 88d035d68b7a4156ab3847b3d901685e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801648452 2024-12-10T03:34:12,993 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b8d31d8ff2c4665b53666047fd8a551, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733801650585 2024-12-10T03:34:12,993 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1aa8bf3b0b064781a82e6922238af60f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733801650585 2024-12-10T03:34:12,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T03:34:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:13,000 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T03:34:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:13,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:13,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:13,008 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:13,008 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#B#compaction#159 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:13,009 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/65d703d99eaf452a991cc7b5c0f3869a is 50, key is test_row_0/B:col10/1733801650585/Put/seqid=0 2024-12-10T03:34:13,012 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210959d7f3e60d64154a5a6beda2e82b5cf_d58f0e0e176fadae27aa99803befaf76 store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:13,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210521e6dd8369b42e0b061da5d232a8b38_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801651723/Put/seqid=0 2024-12-10T03:34:13,014 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210959d7f3e60d64154a5a6beda2e82b5cf_d58f0e0e176fadae27aa99803befaf76, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:13,014 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210959d7f3e60d64154a5a6beda2e82b5cf_d58f0e0e176fadae27aa99803befaf76 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:13,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742010_1186 (size=12459) 2024-12-10T03:34:13,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742011_1187 (size=12304) 2024-12-10T03:34:13,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742012_1188 (size=4469) 2024-12-10T03:34:13,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T03:34:13,429 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/65d703d99eaf452a991cc7b5c0f3869a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/65d703d99eaf452a991cc7b5c0f3869a 2024-12-10T03:34:13,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:13,433 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210521e6dd8369b42e0b061da5d232a8b38_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210521e6dd8369b42e0b061da5d232a8b38_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:13,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/3b9a6f769c514f4fb0b98f36336b118d, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:13,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/3b9a6f769c514f4fb0b98f36336b118d is 175, key is test_row_0/A:col10/1733801651723/Put/seqid=0 2024-12-10T03:34:13,436 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/B of d58f0e0e176fadae27aa99803befaf76 into 65d703d99eaf452a991cc7b5c0f3869a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:13,436 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:13,436 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/B, priority=13, startTime=1733801652991; duration=0sec 2024-12-10T03:34:13,436 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:13,436 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:B 2024-12-10T03:34:13,436 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:13,437 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#A#compaction#160 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:13,438 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:13,438 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/61769daff09043edaae5350d7eb69f3a is 175, key is test_row_0/A:col10/1733801650585/Put/seqid=0 2024-12-10T03:34:13,438 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/C is initiating minor compaction (all files) 2024-12-10T03:34:13,438 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/C in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:13,438 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/00651e294a374c6d91b1f3e5e4e195f7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/406b5f4fe7974a529b204d3ae619e9f7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/f8e831e3d3264757addda0ee6120dac8] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=35.5 K 2024-12-10T03:34:13,439 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 00651e294a374c6d91b1f3e5e4e195f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733801647817 2024-12-10T03:34:13,439 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 406b5f4fe7974a529b204d3ae619e9f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801648452 2024-12-10T03:34:13,440 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f8e831e3d3264757addda0ee6120dac8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733801650585 2024-12-10T03:34:13,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742013_1189 (size=31105) 2024-12-10T03:34:13,450 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/3b9a6f769c514f4fb0b98f36336b118d 2024-12-10T03:34:13,453 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#C#compaction#162 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:13,454 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/23a632373ca5467eb7721451246755b9 is 50, key is test_row_0/C:col10/1733801650585/Put/seqid=0 2024-12-10T03:34:13,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742014_1190 (size=31413) 2024-12-10T03:34:13,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/30648c6aa93947dc8a2e6bc15043530d is 50, key is test_row_0/B:col10/1733801651723/Put/seqid=0 2024-12-10T03:34:13,464 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/61769daff09043edaae5350d7eb69f3a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/61769daff09043edaae5350d7eb69f3a 2024-12-10T03:34:13,471 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/A of d58f0e0e176fadae27aa99803befaf76 into 61769daff09043edaae5350d7eb69f3a(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:13,471 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:13,472 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/A, priority=13, startTime=1733801652990; duration=0sec 2024-12-10T03:34:13,472 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:13,472 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:A 2024-12-10T03:34:13,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742015_1191 (size=12459) 2024-12-10T03:34:13,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742016_1192 (size=12151) 2024-12-10T03:34:13,479 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/30648c6aa93947dc8a2e6bc15043530d 2024-12-10T03:34:13,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/3caa96415a1a414682e590e436d24a36 is 50, key is test_row_0/C:col10/1733801651723/Put/seqid=0 2024-12-10T03:34:13,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742017_1193 (size=12151) 2024-12-10T03:34:13,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:13,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801713850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801713850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801713851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801713852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801713853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,880 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/23a632373ca5467eb7721451246755b9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/23a632373ca5467eb7721451246755b9 2024-12-10T03:34:13,884 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/C of d58f0e0e176fadae27aa99803befaf76 into 23a632373ca5467eb7721451246755b9(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:13,884 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:13,884 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/C, priority=13, startTime=1733801652991; duration=0sec 2024-12-10T03:34:13,884 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:13,884 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:C 2024-12-10T03:34:13,895 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/3caa96415a1a414682e590e436d24a36 2024-12-10T03:34:13,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/3b9a6f769c514f4fb0b98f36336b118d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/3b9a6f769c514f4fb0b98f36336b118d 2024-12-10T03:34:13,903 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/3b9a6f769c514f4fb0b98f36336b118d, entries=150, sequenceid=157, filesize=30.4 K 2024-12-10T03:34:13,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/30648c6aa93947dc8a2e6bc15043530d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/30648c6aa93947dc8a2e6bc15043530d 2024-12-10T03:34:13,908 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/30648c6aa93947dc8a2e6bc15043530d, entries=150, sequenceid=157, filesize=11.9 K 2024-12-10T03:34:13,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/3caa96415a1a414682e590e436d24a36 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/3caa96415a1a414682e590e436d24a36 2024-12-10T03:34:13,913 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/3caa96415a1a414682e590e436d24a36, entries=150, sequenceid=157, filesize=11.9 K 2024-12-10T03:34:13,914 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for d58f0e0e176fadae27aa99803befaf76 in 914ms, sequenceid=157, compaction requested=false 2024-12-10T03:34:13,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:13,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:13,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-10T03:34:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-10T03:34:13,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-10T03:34:13,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8300 sec 2024-12-10T03:34:13,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.8340 sec 2024-12-10T03:34:13,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:13,956 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-10T03:34:13,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:13,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:13,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:13,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:13,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:13,957 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:13,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106f92ade53fee45cc85161fc0d67b335c_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801653955/Put/seqid=0 2024-12-10T03:34:13,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742018_1194 (size=12304) 2024-12-10T03:34:13,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801713966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801713967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801713968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801713968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:13,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801713968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801714071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801714071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801714072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801714072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801714072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T03:34:14,189 INFO [Thread-729 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-10T03:34:14,189 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:14,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-10T03:34:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T03:34:14,191 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:14,191 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:14,191 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:14,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801714273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801714273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801714273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801714274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801714274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T03:34:14,342 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:14,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:14,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,369 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:14,373 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106f92ade53fee45cc85161fc0d67b335c_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106f92ade53fee45cc85161fc0d67b335c_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:14,373 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b629cdf790e34b34924b0e8649c3318b, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:14,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b629cdf790e34b34924b0e8649c3318b is 175, key is test_row_0/A:col10/1733801653955/Put/seqid=0 2024-12-10T03:34:14,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742019_1195 (size=31105) 2024-12-10T03:34:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T03:34:14,495 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:14,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:14,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801714574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801714574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801714576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801714578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:14,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801714578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:14,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:14,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,783 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=180, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b629cdf790e34b34924b0e8649c3318b 2024-12-10T03:34:14,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/1f2d69ab635d47baac2dad551139896e is 50, key is test_row_0/B:col10/1733801653955/Put/seqid=0 2024-12-10T03:34:14,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T03:34:14,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742020_1196 (size=12151) 2024-12-10T03:34:14,800 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:14,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:14,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,957 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:14,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:14,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:14,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:14,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:14,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:15,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801715079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801715081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801715081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801715081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801715083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,109 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:15,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:15,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/1f2d69ab635d47baac2dad551139896e 2024-12-10T03:34:15,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/4048f7dd3e8c4b83bbd74a3b9e9b567f is 50, key is test_row_0/C:col10/1733801653955/Put/seqid=0 2024-12-10T03:34:15,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742021_1197 (size=12151) 2024-12-10T03:34:15,261 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:15,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:15,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T03:34:15,413 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,566 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:15,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:15,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:15,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/4048f7dd3e8c4b83bbd74a3b9e9b567f 2024-12-10T03:34:15,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b629cdf790e34b34924b0e8649c3318b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b629cdf790e34b34924b0e8649c3318b 2024-12-10T03:34:15,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b629cdf790e34b34924b0e8649c3318b, entries=150, sequenceid=180, filesize=30.4 K 2024-12-10T03:34:15,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/1f2d69ab635d47baac2dad551139896e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f2d69ab635d47baac2dad551139896e 2024-12-10T03:34:15,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f2d69ab635d47baac2dad551139896e, entries=150, sequenceid=180, filesize=11.9 K 2024-12-10T03:34:15,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/4048f7dd3e8c4b83bbd74a3b9e9b567f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4048f7dd3e8c4b83bbd74a3b9e9b567f 2024-12-10T03:34:15,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4048f7dd3e8c4b83bbd74a3b9e9b567f, entries=150, sequenceid=180, filesize=11.9 K 2024-12-10T03:34:15,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for d58f0e0e176fadae27aa99803befaf76 in 1675ms, sequenceid=180, compaction requested=true 2024-12-10T03:34:15,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:15,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:15,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:15,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:15,631 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:15,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:15,631 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:15,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:15,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:15,632 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:15,632 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:15,632 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/B is initiating minor compaction (all files) 2024-12-10T03:34:15,632 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/A is initiating minor compaction (all files) 2024-12-10T03:34:15,632 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/B in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,632 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/A in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,632 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/65d703d99eaf452a991cc7b5c0f3869a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/30648c6aa93947dc8a2e6bc15043530d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f2d69ab635d47baac2dad551139896e] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=35.9 K 2024-12-10T03:34:15,632 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/61769daff09043edaae5350d7eb69f3a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/3b9a6f769c514f4fb0b98f36336b118d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b629cdf790e34b34924b0e8649c3318b] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=91.4 K 2024-12-10T03:34:15,632 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,633 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/61769daff09043edaae5350d7eb69f3a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/3b9a6f769c514f4fb0b98f36336b118d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b629cdf790e34b34924b0e8649c3318b] 2024-12-10T03:34:15,633 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 65d703d99eaf452a991cc7b5c0f3869a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733801650585 2024-12-10T03:34:15,633 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61769daff09043edaae5350d7eb69f3a, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733801650585 2024-12-10T03:34:15,633 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 30648c6aa93947dc8a2e6bc15043530d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733801651722 2024-12-10T03:34:15,633 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f2d69ab635d47baac2dad551139896e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733801653954 2024-12-10T03:34:15,633 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b9a6f769c514f4fb0b98f36336b118d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733801651722 2024-12-10T03:34:15,634 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b629cdf790e34b34924b0e8649c3318b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733801653954 2024-12-10T03:34:15,640 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:15,643 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#B#compaction#169 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:15,643 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/c3bb3adad12347f89031acc5117d36a9 is 50, key is test_row_0/B:col10/1733801653955/Put/seqid=0 2024-12-10T03:34:15,648 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412103df9ea39d8a54ad3abc810198787b8f4_d58f0e0e176fadae27aa99803befaf76 store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:15,650 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412103df9ea39d8a54ad3abc810198787b8f4_d58f0e0e176fadae27aa99803befaf76, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:15,650 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103df9ea39d8a54ad3abc810198787b8f4_d58f0e0e176fadae27aa99803befaf76 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:15,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742022_1198 (size=12561) 2024-12-10T03:34:15,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742023_1199 (size=4469) 2024-12-10T03:34:15,719 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:15,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T03:34:15,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:15,719 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T03:34:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:15,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:15,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d3f15c53ecc54a43b4cec7ad0ad7043c_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801653966/Put/seqid=0 2024-12-10T03:34:15,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742024_1200 (size=12304) 2024-12-10T03:34:16,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:16,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:16,089 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#A#compaction#168 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:16,090 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/9086b712be114724ac2f7bdf02280372 is 175, key is test_row_0/A:col10/1733801653955/Put/seqid=0 2024-12-10T03:34:16,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742025_1201 (size=31515) 2024-12-10T03:34:16,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801716098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801716099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,103 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/c3bb3adad12347f89031acc5117d36a9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/c3bb3adad12347f89031acc5117d36a9 2024-12-10T03:34:16,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801716100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,106 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/9086b712be114724ac2f7bdf02280372 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9086b712be114724ac2f7bdf02280372 2024-12-10T03:34:16,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801716102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801716102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,109 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/B of d58f0e0e176fadae27aa99803befaf76 into c3bb3adad12347f89031acc5117d36a9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:16,109 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:16,109 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/B, priority=13, startTime=1733801655631; duration=0sec 2024-12-10T03:34:16,109 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:16,109 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:B 2024-12-10T03:34:16,110 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:16,111 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:16,111 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/C is initiating minor compaction (all files) 2024-12-10T03:34:16,111 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/C in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:16,111 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/23a632373ca5467eb7721451246755b9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/3caa96415a1a414682e590e436d24a36, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4048f7dd3e8c4b83bbd74a3b9e9b567f] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=35.9 K 2024-12-10T03:34:16,112 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 23a632373ca5467eb7721451246755b9, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733801650585 2024-12-10T03:34:16,112 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 3caa96415a1a414682e590e436d24a36, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733801651722 2024-12-10T03:34:16,113 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 4048f7dd3e8c4b83bbd74a3b9e9b567f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733801653954 2024-12-10T03:34:16,114 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/A of d58f0e0e176fadae27aa99803befaf76 into 9086b712be114724ac2f7bdf02280372(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:16,114 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:16,114 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/A, priority=13, startTime=1733801655631; duration=0sec 2024-12-10T03:34:16,114 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:16,114 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:A 2024-12-10T03:34:16,121 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#C#compaction#171 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:16,122 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/ab9b8856e1104fc6acfe5b48ffd56f1b is 50, key is test_row_0/C:col10/1733801653955/Put/seqid=0 2024-12-10T03:34:16,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742026_1202 (size=12561) 2024-12-10T03:34:16,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:16,136 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d3f15c53ecc54a43b4cec7ad0ad7043c_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d3f15c53ecc54a43b4cec7ad0ad7043c_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:16,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b303d00f40884aec8e8fda5364c3b76b, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:16,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b303d00f40884aec8e8fda5364c3b76b is 175, key is test_row_0/A:col10/1733801653966/Put/seqid=0 2024-12-10T03:34:16,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742027_1203 (size=31105) 2024-12-10T03:34:16,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801716203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801716203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801716205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801716207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801716207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T03:34:16,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801716406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801716406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801716407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801716409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801716410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,534 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/ab9b8856e1104fc6acfe5b48ffd56f1b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ab9b8856e1104fc6acfe5b48ffd56f1b 2024-12-10T03:34:16,539 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/C of d58f0e0e176fadae27aa99803befaf76 into ab9b8856e1104fc6acfe5b48ffd56f1b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:16,539 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:16,539 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/C, priority=13, startTime=1733801655631; duration=0sec 2024-12-10T03:34:16,539 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:16,539 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:C 2024-12-10T03:34:16,542 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b303d00f40884aec8e8fda5364c3b76b 2024-12-10T03:34:16,546 INFO [master/50b9ef1c5472:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-10T03:34:16,546 INFO [master/50b9ef1c5472:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-10T03:34:16,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/d2f2af5a3f574f4faa482d679c06aebf is 50, key is test_row_0/B:col10/1733801653966/Put/seqid=0 2024-12-10T03:34:16,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742028_1204 (size=12151) 2024-12-10T03:34:16,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801716709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801716709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801716711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801716713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:16,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801716713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:16,954 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/d2f2af5a3f574f4faa482d679c06aebf 2024-12-10T03:34:16,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/69342da4f9b4404bbb0d44337407f024 is 50, key is test_row_0/C:col10/1733801653966/Put/seqid=0 2024-12-10T03:34:16,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742029_1205 (size=12151) 2024-12-10T03:34:17,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:17,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801717211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:17,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:17,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801717212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:17,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:17,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801717213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:17,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:17,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801717214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:17,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:17,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801717219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:17,367 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/69342da4f9b4404bbb0d44337407f024 2024-12-10T03:34:17,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/b303d00f40884aec8e8fda5364c3b76b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b303d00f40884aec8e8fda5364c3b76b 2024-12-10T03:34:17,377 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b303d00f40884aec8e8fda5364c3b76b, entries=150, sequenceid=196, filesize=30.4 K 2024-12-10T03:34:17,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/d2f2af5a3f574f4faa482d679c06aebf as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d2f2af5a3f574f4faa482d679c06aebf 2024-12-10T03:34:17,382 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d2f2af5a3f574f4faa482d679c06aebf, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T03:34:17,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/69342da4f9b4404bbb0d44337407f024 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/69342da4f9b4404bbb0d44337407f024 2024-12-10T03:34:17,387 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/69342da4f9b4404bbb0d44337407f024, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T03:34:17,388 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for d58f0e0e176fadae27aa99803befaf76 in 1669ms, sequenceid=196, compaction requested=false 2024-12-10T03:34:17,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:17,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:17,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-10T03:34:17,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-10T03:34:17,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-10T03:34:17,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1990 sec 2024-12-10T03:34:17,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 3.2010 sec 2024-12-10T03:34:18,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:18,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T03:34:18,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:18,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:18,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:18,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:18,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:18,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:18,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121010129af3f5614b4e94352237305e3f7f_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801658217/Put/seqid=0 2024-12-10T03:34:18,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801718226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801718227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742030_1206 (size=12304) 2024-12-10T03:34:18,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801718227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801718228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801718228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T03:34:18,296 INFO [Thread-729 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-10T03:34:18,297 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:18,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-10T03:34:18,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T03:34:18,298 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:18,298 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:18,298 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:18,331 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801718329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801718329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801718330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801718330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801718330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T03:34:18,449 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T03:34:18,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:18,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:18,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:18,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801718533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801718533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801718534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801718534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801718534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T03:34:18,601 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T03:34:18,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:18,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:18,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:18,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,630 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:18,633 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121010129af3f5614b4e94352237305e3f7f_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121010129af3f5614b4e94352237305e3f7f_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:18,634 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/1b87e757d47942b4bf135e38e7c8886b, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:18,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/1b87e757d47942b4bf135e38e7c8886b is 175, key is test_row_0/A:col10/1733801658217/Put/seqid=0 2024-12-10T03:34:18,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742031_1207 (size=31105) 2024-12-10T03:34:18,638 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=221, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/1b87e757d47942b4bf135e38e7c8886b 2024-12-10T03:34:18,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/fba5b3c8768e4bf3bc08ff05190d9b0d is 50, key is test_row_0/B:col10/1733801658217/Put/seqid=0 2024-12-10T03:34:18,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742032_1208 (size=12151) 2024-12-10T03:34:18,753 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T03:34:18,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:18,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:18,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:18,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801718834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801718836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801718836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801718837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801718837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T03:34:18,906 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:18,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T03:34:18,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:18,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:18,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:18,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/fba5b3c8768e4bf3bc08ff05190d9b0d 2024-12-10T03:34:19,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/98cf934235984572876d95d9d0944310 is 50, key is test_row_0/C:col10/1733801658217/Put/seqid=0 2024-12-10T03:34:19,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742033_1209 (size=12151) 2024-12-10T03:34:19,058 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T03:34:19,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:19,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T03:34:19,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:19,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:19,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801719338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:19,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801719339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:19,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801719339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:19,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801719339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:19,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801719342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,366 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T03:34:19,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:19,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:19,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T03:34:19,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/98cf934235984572876d95d9d0944310 2024-12-10T03:34:19,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/1b87e757d47942b4bf135e38e7c8886b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1b87e757d47942b4bf135e38e7c8886b 2024-12-10T03:34:19,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1b87e757d47942b4bf135e38e7c8886b, entries=150, sequenceid=221, filesize=30.4 K 2024-12-10T03:34:19,471 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/fba5b3c8768e4bf3bc08ff05190d9b0d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fba5b3c8768e4bf3bc08ff05190d9b0d 2024-12-10T03:34:19,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fba5b3c8768e4bf3bc08ff05190d9b0d, entries=150, sequenceid=221, filesize=11.9 K 2024-12-10T03:34:19,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/98cf934235984572876d95d9d0944310 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/98cf934235984572876d95d9d0944310 2024-12-10T03:34:19,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/98cf934235984572876d95d9d0944310, entries=150, sequenceid=221, filesize=11.9 K 2024-12-10T03:34:19,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for d58f0e0e176fadae27aa99803befaf76 in 1263ms, sequenceid=221, compaction requested=true 2024-12-10T03:34:19,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:19,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:19,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:19,481 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:19,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:19,481 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:19,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:19,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:19,481 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:19,482 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:19,482 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/A is initiating minor compaction (all files) 2024-12-10T03:34:19,482 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/A in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,482 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9086b712be114724ac2f7bdf02280372, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b303d00f40884aec8e8fda5364c3b76b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1b87e757d47942b4bf135e38e7c8886b] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=91.5 K 2024-12-10T03:34:19,483 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,483 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9086b712be114724ac2f7bdf02280372, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b303d00f40884aec8e8fda5364c3b76b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1b87e757d47942b4bf135e38e7c8886b] 2024-12-10T03:34:19,483 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9086b712be114724ac2f7bdf02280372, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733801653954 2024-12-10T03:34:19,483 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b303d00f40884aec8e8fda5364c3b76b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801653964 2024-12-10T03:34:19,483 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b87e757d47942b4bf135e38e7c8886b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733801656100 2024-12-10T03:34:19,487 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:19,487 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/B is initiating minor compaction (all files) 2024-12-10T03:34:19,487 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/B in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,487 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/c3bb3adad12347f89031acc5117d36a9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d2f2af5a3f574f4faa482d679c06aebf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fba5b3c8768e4bf3bc08ff05190d9b0d] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=36.0 K 2024-12-10T03:34:19,488 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c3bb3adad12347f89031acc5117d36a9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733801653954 2024-12-10T03:34:19,489 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:19,490 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d2f2af5a3f574f4faa482d679c06aebf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801653964 2024-12-10T03:34:19,490 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting fba5b3c8768e4bf3bc08ff05190d9b0d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733801656100 2024-12-10T03:34:19,492 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210a4ed41e4b2804935b9b82beba043ea16_d58f0e0e176fadae27aa99803befaf76 store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:19,494 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210a4ed41e4b2804935b9b82beba043ea16_d58f0e0e176fadae27aa99803befaf76, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:19,494 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210a4ed41e4b2804935b9b82beba043ea16_d58f0e0e176fadae27aa99803befaf76 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:19,498 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#B#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:19,499 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/1f33c36a66e04a869823e0cebe972e05 is 50, key is test_row_0/B:col10/1733801658217/Put/seqid=0 2024-12-10T03:34:19,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742035_1211 (size=12663) 2024-12-10T03:34:19,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742034_1210 (size=4469) 2024-12-10T03:34:19,513 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#A#compaction#177 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:19,514 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/37535a4c92294f60bdc24278878116d4 is 175, key is test_row_0/A:col10/1733801658217/Put/seqid=0 2024-12-10T03:34:19,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742036_1212 (size=31617) 2024-12-10T03:34:19,519 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:19,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T03:34:19,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,520 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T03:34:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:19,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:19,527 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/37535a4c92294f60bdc24278878116d4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/37535a4c92294f60bdc24278878116d4 2024-12-10T03:34:19,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106f7dbfa486a7490fa46d46df909e8bd2_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801658226/Put/seqid=0 2024-12-10T03:34:19,535 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/A of d58f0e0e176fadae27aa99803befaf76 into 37535a4c92294f60bdc24278878116d4(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:19,535 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:19,535 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/A, priority=13, startTime=1733801659481; duration=0sec 2024-12-10T03:34:19,535 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:19,535 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:A 2024-12-10T03:34:19,535 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:19,536 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:19,537 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/C is initiating minor compaction (all files) 2024-12-10T03:34:19,537 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/C in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:19,537 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ab9b8856e1104fc6acfe5b48ffd56f1b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/69342da4f9b4404bbb0d44337407f024, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/98cf934235984572876d95d9d0944310] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=36.0 K 2024-12-10T03:34:19,537 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab9b8856e1104fc6acfe5b48ffd56f1b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733801653954 2024-12-10T03:34:19,537 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69342da4f9b4404bbb0d44337407f024, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801653964 2024-12-10T03:34:19,538 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98cf934235984572876d95d9d0944310, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733801656100 2024-12-10T03:34:19,544 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#C#compaction#180 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:19,545 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/ee1d3ef879af4b71becb56b444198dc7 is 50, key is test_row_0/C:col10/1733801658217/Put/seqid=0 2024-12-10T03:34:19,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742037_1213 (size=12304) 2024-12-10T03:34:19,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742038_1214 (size=12663) 2024-12-10T03:34:19,915 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/1f33c36a66e04a869823e0cebe972e05 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f33c36a66e04a869823e0cebe972e05 2024-12-10T03:34:19,919 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/B of d58f0e0e176fadae27aa99803befaf76 into 1f33c36a66e04a869823e0cebe972e05(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:19,919 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:19,919 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/B, priority=13, startTime=1733801659481; duration=0sec 2024-12-10T03:34:19,919 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:19,919 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:B 2024-12-10T03:34:19,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:19,950 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106f7dbfa486a7490fa46d46df909e8bd2_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106f7dbfa486a7490fa46d46df909e8bd2_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:19,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/1ddfd1e810dc4b10abc19d3fef698883, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:19,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/1ddfd1e810dc4b10abc19d3fef698883 is 175, key is test_row_0/A:col10/1733801658226/Put/seqid=0 2024-12-10T03:34:19,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742039_1215 (size=31105) 2024-12-10T03:34:19,965 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/ee1d3ef879af4b71becb56b444198dc7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ee1d3ef879af4b71becb56b444198dc7 2024-12-10T03:34:19,970 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/C of d58f0e0e176fadae27aa99803befaf76 into ee1d3ef879af4b71becb56b444198dc7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:19,970 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:19,970 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/C, priority=13, startTime=1733801659481; duration=0sec 2024-12-10T03:34:19,970 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:19,970 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:C 2024-12-10T03:34:20,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:20,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:20,355 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/1ddfd1e810dc4b10abc19d3fef698883 2024-12-10T03:34:20,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801720353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801720355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801720355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801720356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801720356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/d337e6cefcb1420eb376b97f77525d89 is 50, key is test_row_0/B:col10/1733801658226/Put/seqid=0 2024-12-10T03:34:20,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742040_1216 (size=12151) 2024-12-10T03:34:20,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T03:34:20,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801720457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801720458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801720458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801720459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801720459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801720660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801720660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801720660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801720661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801720662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,766 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/d337e6cefcb1420eb376b97f77525d89 2024-12-10T03:34:20,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/32f9047f2ca7441ea4d28fa133d10512 is 50, key is test_row_0/C:col10/1733801658226/Put/seqid=0 2024-12-10T03:34:20,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742041_1217 (size=12151) 2024-12-10T03:34:20,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801720962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801720963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801720965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801720965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:20,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:20,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801720965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,177 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/32f9047f2ca7441ea4d28fa133d10512 2024-12-10T03:34:21,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/1ddfd1e810dc4b10abc19d3fef698883 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1ddfd1e810dc4b10abc19d3fef698883 2024-12-10T03:34:21,211 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1ddfd1e810dc4b10abc19d3fef698883, entries=150, sequenceid=236, filesize=30.4 K 2024-12-10T03:34:21,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/d337e6cefcb1420eb376b97f77525d89 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d337e6cefcb1420eb376b97f77525d89 2024-12-10T03:34:21,215 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d337e6cefcb1420eb376b97f77525d89, entries=150, sequenceid=236, filesize=11.9 K 2024-12-10T03:34:21,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/32f9047f2ca7441ea4d28fa133d10512 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/32f9047f2ca7441ea4d28fa133d10512 2024-12-10T03:34:21,219 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/32f9047f2ca7441ea4d28fa133d10512, entries=150, sequenceid=236, filesize=11.9 K 2024-12-10T03:34:21,220 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for d58f0e0e176fadae27aa99803befaf76 in 1701ms, sequenceid=236, compaction requested=false 2024-12-10T03:34:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-10T03:34:21,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-10T03:34:21,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-10T03:34:21,222 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9230 sec 2024-12-10T03:34:21,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.9260 sec 2024-12-10T03:34:21,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:21,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T03:34:21,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:21,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:21,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:21,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:21,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:21,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:21,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801721475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801721476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801721476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801721476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801721476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210759b2e6ed4894127ac1aa04f584e1819_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801661464/Put/seqid=0 2024-12-10T03:34:21,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742042_1218 (size=12454) 2024-12-10T03:34:21,492 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,496 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210759b2e6ed4894127ac1aa04f584e1819_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210759b2e6ed4894127ac1aa04f584e1819_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:21,497 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/9f97a6a6a2d144d0b9e070fcf85a3d48, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:21,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/9f97a6a6a2d144d0b9e070fcf85a3d48 is 175, key is test_row_0/A:col10/1733801661464/Put/seqid=0 2024-12-10T03:34:21,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742043_1219 (size=31255) 2024-12-10T03:34:21,505 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/9f97a6a6a2d144d0b9e070fcf85a3d48 2024-12-10T03:34:21,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/3375b5d8216f4961a0fa481d7932297a is 50, key is test_row_0/B:col10/1733801661464/Put/seqid=0 2024-12-10T03:34:21,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742044_1220 (size=12301) 2024-12-10T03:34:21,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/3375b5d8216f4961a0fa481d7932297a 2024-12-10T03:34:21,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/56f10c7344914be785b6995c9973756e is 50, key is test_row_0/C:col10/1733801661464/Put/seqid=0 2024-12-10T03:34:21,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742045_1221 (size=12301) 2024-12-10T03:34:21,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/56f10c7344914be785b6995c9973756e 2024-12-10T03:34:21,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/9f97a6a6a2d144d0b9e070fcf85a3d48 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9f97a6a6a2d144d0b9e070fcf85a3d48 2024-12-10T03:34:21,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9f97a6a6a2d144d0b9e070fcf85a3d48, entries=150, sequenceid=262, filesize=30.5 K 2024-12-10T03:34:21,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/3375b5d8216f4961a0fa481d7932297a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3375b5d8216f4961a0fa481d7932297a 2024-12-10T03:34:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3375b5d8216f4961a0fa481d7932297a, entries=150, sequenceid=262, filesize=12.0 K 2024-12-10T03:34:21,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/56f10c7344914be785b6995c9973756e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/56f10c7344914be785b6995c9973756e 2024-12-10T03:34:21,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/56f10c7344914be785b6995c9973756e, entries=150, sequenceid=262, filesize=12.0 K 2024-12-10T03:34:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d58f0e0e176fadae27aa99803befaf76 in 81ms, sequenceid=262, compaction requested=true 2024-12-10T03:34:21,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:21,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:21,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:21,547 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:21,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:21,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:21,547 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:21,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:21,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,548 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:21,548 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93977 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:21,548 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/B is initiating minor compaction (all files) 2024-12-10T03:34:21,548 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/A is initiating minor compaction (all files) 2024-12-10T03:34:21,548 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/B in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:21,548 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/A in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:21,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,548 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f33c36a66e04a869823e0cebe972e05, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d337e6cefcb1420eb376b97f77525d89, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3375b5d8216f4961a0fa481d7932297a] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=36.2 K 2024-12-10T03:34:21,548 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/37535a4c92294f60bdc24278878116d4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1ddfd1e810dc4b10abc19d3fef698883, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9f97a6a6a2d144d0b9e070fcf85a3d48] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=91.8 K 2024-12-10T03:34:21,548 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:21,548 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/37535a4c92294f60bdc24278878116d4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1ddfd1e810dc4b10abc19d3fef698883, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9f97a6a6a2d144d0b9e070fcf85a3d48] 2024-12-10T03:34:21,549 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f33c36a66e04a869823e0cebe972e05, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733801656100 2024-12-10T03:34:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,549 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37535a4c92294f60bdc24278878116d4, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733801656100 2024-12-10T03:34:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,549 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ddfd1e810dc4b10abc19d3fef698883, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733801658226 2024-12-10T03:34:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,549 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d337e6cefcb1420eb376b97f77525d89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733801658226 2024-12-10T03:34:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,550 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f97a6a6a2d144d0b9e070fcf85a3d48, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733801660355 2024-12-10T03:34:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,551 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 3375b5d8216f4961a0fa481d7932297a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733801660355 2024-12-10T03:34:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,558 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:21,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,562 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412103584c64da889428a87a0a61f0424d8eb_d58f0e0e176fadae27aa99803befaf76 store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,564 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#B#compaction#187 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:21,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,564 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/d722168f19f04f72afd98e0af11e12bf is 50, key is test_row_0/B:col10/1733801661464/Put/seqid=0 2024-12-10T03:34:21,564 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412103584c64da889428a87a0a61f0424d8eb_d58f0e0e176fadae27aa99803befaf76, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:21,565 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103584c64da889428a87a0a61f0424d8eb_d58f0e0e176fadae27aa99803befaf76 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742046_1222 (size=12915) 2024-12-10T03:34:21,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742047_1223 (size=4469) 2024-12-10T03:34:21,593 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/d722168f19f04f72afd98e0af11e12bf as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d722168f19f04f72afd98e0af11e12bf 2024-12-10T03:34:21,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:21,594 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T03:34:21,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:21,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:21,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:21,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:21,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:21,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:21,600 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/B of d58f0e0e176fadae27aa99803befaf76 into d722168f19f04f72afd98e0af11e12bf(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:21,600 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:21,600 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/B, priority=13, startTime=1733801661547; duration=0sec 2024-12-10T03:34:21,600 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:21,600 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:B 2024-12-10T03:34:21,600 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:21,601 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:21,601 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/C is initiating minor compaction (all files) 2024-12-10T03:34:21,602 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/C in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:21,602 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ee1d3ef879af4b71becb56b444198dc7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/32f9047f2ca7441ea4d28fa133d10512, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/56f10c7344914be785b6995c9973756e] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=36.2 K 2024-12-10T03:34:21,602 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ee1d3ef879af4b71becb56b444198dc7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733801656100 2024-12-10T03:34:21,603 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 32f9047f2ca7441ea4d28fa133d10512, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733801658226 2024-12-10T03:34:21,603 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 56f10c7344914be785b6995c9973756e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733801660355 2024-12-10T03:34:21,618 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#C#compaction#188 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:21,619 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/417adb7c54b04ccc8bb8b7bbfb471a62 is 50, key is test_row_0/C:col10/1733801661464/Put/seqid=0 2024-12-10T03:34:21,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210886eb000351f4398b60adf7ce83cee4a_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801661470/Put/seqid=0 2024-12-10T03:34:21,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742048_1224 (size=12915) 2024-12-10T03:34:21,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742049_1225 (size=17534) 2024-12-10T03:34:21,637 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:21,642 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210886eb000351f4398b60adf7ce83cee4a_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210886eb000351f4398b60adf7ce83cee4a_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:21,644 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/c3a45e4516094b68a1b26178f425c447, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:21,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/c3a45e4516094b68a1b26178f425c447 is 175, key is test_row_0/A:col10/1733801661470/Put/seqid=0 2024-12-10T03:34:21,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801721645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742050_1226 (size=48639) 2024-12-10T03:34:21,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801721647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801721648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801721749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801721755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801721756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801721952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:21,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801721957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801721957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:21,989 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#A#compaction#186 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:21,989 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/6db6384a0219497db388e46e8ead2313 is 175, key is test_row_0/A:col10/1733801661464/Put/seqid=0 2024-12-10T03:34:21,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742051_1227 (size=31869) 2024-12-10T03:34:22,034 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/417adb7c54b04ccc8bb8b7bbfb471a62 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/417adb7c54b04ccc8bb8b7bbfb471a62 2024-12-10T03:34:22,038 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/C of d58f0e0e176fadae27aa99803befaf76 into 417adb7c54b04ccc8bb8b7bbfb471a62(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:22,038 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:22,038 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/C, priority=13, startTime=1733801661547; duration=0sec 2024-12-10T03:34:22,038 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:22,038 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:C 2024-12-10T03:34:22,054 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/c3a45e4516094b68a1b26178f425c447 2024-12-10T03:34:22,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/cece2dcf9e104ac9b344eb49e90cd6f4 is 50, key is test_row_0/B:col10/1733801661470/Put/seqid=0 2024-12-10T03:34:22,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742052_1228 (size=12301) 2024-12-10T03:34:22,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/cece2dcf9e104ac9b344eb49e90cd6f4 2024-12-10T03:34:22,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/da712079551441e3ae39b363a78b2a38 is 50, key is test_row_0/C:col10/1733801661470/Put/seqid=0 2024-12-10T03:34:22,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742053_1229 (size=12301) 2024-12-10T03:34:22,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801722254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801722259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801722260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,400 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/6db6384a0219497db388e46e8ead2313 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/6db6384a0219497db388e46e8ead2313 2024-12-10T03:34:22,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T03:34:22,402 INFO [Thread-729 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-10T03:34:22,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:22,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-10T03:34:22,404 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/A of d58f0e0e176fadae27aa99803befaf76 into 6db6384a0219497db388e46e8ead2313(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:22,404 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:22,404 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/A, priority=13, startTime=1733801661547; duration=0sec 2024-12-10T03:34:22,405 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:22,405 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:22,405 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:A 2024-12-10T03:34:22,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T03:34:22,405 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:22,405 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:22,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/da712079551441e3ae39b363a78b2a38 2024-12-10T03:34:22,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801722479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/c3a45e4516094b68a1b26178f425c447 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/c3a45e4516094b68a1b26178f425c447 2024-12-10T03:34:22,482 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801722481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/c3a45e4516094b68a1b26178f425c447, entries=250, sequenceid=278, filesize=47.5 K 2024-12-10T03:34:22,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/cece2dcf9e104ac9b344eb49e90cd6f4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/cece2dcf9e104ac9b344eb49e90cd6f4 2024-12-10T03:34:22,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/cece2dcf9e104ac9b344eb49e90cd6f4, entries=150, sequenceid=278, filesize=12.0 K 2024-12-10T03:34:22,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/da712079551441e3ae39b363a78b2a38 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/da712079551441e3ae39b363a78b2a38 2024-12-10T03:34:22,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/da712079551441e3ae39b363a78b2a38, entries=150, sequenceid=278, filesize=12.0 K 2024-12-10T03:34:22,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for d58f0e0e176fadae27aa99803befaf76 in 902ms, sequenceid=278, compaction requested=false 2024-12-10T03:34:22,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:22,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T03:34:22,557 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-10T03:34:22,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:22,558 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-10T03:34:22,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:22,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:22,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:22,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:22,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:22,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:22,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121042ada89f86e746ab96577671ff2db20d_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801661645/Put/seqid=0 2024-12-10T03:34:22,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742054_1230 (size=12454) 2024-12-10T03:34:22,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T03:34:22,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:22,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:22,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801722768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801722769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801722769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801722871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801722871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:22,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801722872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:22,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:22,973 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121042ada89f86e746ab96577671ff2db20d_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121042ada89f86e746ab96577671ff2db20d_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:22,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/08c762aea2fc415b84e5c5543a849ae4, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:22,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/08c762aea2fc415b84e5c5543a849ae4 is 175, key is test_row_0/A:col10/1733801661645/Put/seqid=0 2024-12-10T03:34:22,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742055_1231 (size=31255) 2024-12-10T03:34:23,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T03:34:23,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801723073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:23,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801723073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:23,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801723073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:23,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801723375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:23,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801723377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:23,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801723377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:23,379 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=301, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/08c762aea2fc415b84e5c5543a849ae4 2024-12-10T03:34:23,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/839e7b67590542ee9a3702cc5d7aae55 is 50, key is test_row_0/B:col10/1733801661645/Put/seqid=0 2024-12-10T03:34:23,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742056_1232 (size=12301) 2024-12-10T03:34:23,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T03:34:23,791 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/839e7b67590542ee9a3702cc5d7aae55 2024-12-10T03:34:23,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/579bf0700f184d27ab0645a543fb9566 is 50, key is test_row_0/C:col10/1733801661645/Put/seqid=0 2024-12-10T03:34:23,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742057_1233 (size=12301) 2024-12-10T03:34:23,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801723878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:23,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801723881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:23,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:23,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801723882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,209 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/579bf0700f184d27ab0645a543fb9566 2024-12-10T03:34:24,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/08c762aea2fc415b84e5c5543a849ae4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/08c762aea2fc415b84e5c5543a849ae4 2024-12-10T03:34:24,217 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/08c762aea2fc415b84e5c5543a849ae4, entries=150, sequenceid=301, filesize=30.5 K 2024-12-10T03:34:24,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/839e7b67590542ee9a3702cc5d7aae55 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/839e7b67590542ee9a3702cc5d7aae55 2024-12-10T03:34:24,222 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/839e7b67590542ee9a3702cc5d7aae55, entries=150, sequenceid=301, filesize=12.0 K 2024-12-10T03:34:24,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/579bf0700f184d27ab0645a543fb9566 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/579bf0700f184d27ab0645a543fb9566 2024-12-10T03:34:24,226 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/579bf0700f184d27ab0645a543fb9566, entries=150, sequenceid=301, filesize=12.0 K 2024-12-10T03:34:24,227 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for d58f0e0e176fadae27aa99803befaf76 in 1670ms, sequenceid=301, compaction requested=true 2024-12-10T03:34:24,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:24,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:24,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-10T03:34:24,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-10T03:34:24,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-10T03:34:24,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8230 sec 2024-12-10T03:34:24,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.8250 sec 2024-12-10T03:34:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:24,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-10T03:34:24,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:24,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:24,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:24,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:24,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:24,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:24,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f210a69c818f496c88690f5e9fab819e_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801662768/Put/seqid=0 2024-12-10T03:34:24,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742058_1234 (size=12454) 2024-12-10T03:34:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T03:34:24,509 INFO [Thread-729 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-10T03:34:24,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-10T03:34:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T03:34:24,511 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:24,511 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:24,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:24,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801724531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801724535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T03:34:24,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801724636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801724637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,662 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:24,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:24,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:24,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:24,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:24,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:24,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T03:34:24,815 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:24,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:24,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:24,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:24,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:24,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:24,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801724838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801724838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60904 deadline: 1733801724883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60886 deadline: 1733801724886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:24,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60900 deadline: 1733801724886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,901 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:24,905 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f210a69c818f496c88690f5e9fab819e_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f210a69c818f496c88690f5e9fab819e_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:24,906 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bb25b88500cc4d06991cc3190495deb2, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:24,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bb25b88500cc4d06991cc3190495deb2 is 175, key is test_row_0/A:col10/1733801662768/Put/seqid=0 2024-12-10T03:34:24,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742059_1235 (size=31255) 2024-12-10T03:34:24,967 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:24,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:24,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:24,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:24,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:24,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:24,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:24,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T03:34:25,120 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:25,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:25,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:25,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801725141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:25,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801725145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,272 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:25,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:25,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,321 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bb25b88500cc4d06991cc3190495deb2 2024-12-10T03:34:25,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/ca755e5b2ac14e6594e1633fb4ada1e2 is 50, key is test_row_0/B:col10/1733801662768/Put/seqid=0 2024-12-10T03:34:25,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742060_1236 (size=12301) 2024-12-10T03:34:25,425 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:25,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:25,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,577 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:25,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:25,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T03:34:25,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60950 deadline: 1733801725646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60944 deadline: 1733801725651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,729 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:25,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:25,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/ca755e5b2ac14e6594e1633fb4ada1e2 2024-12-10T03:34:25,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/ec02fce54340495ca2eca6c5b21f952c is 50, key is test_row_0/C:col10/1733801662768/Put/seqid=0 2024-12-10T03:34:25,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742061_1237 (size=12301) 2024-12-10T03:34:25,863 DEBUG [Thread-730 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:51621 2024-12-10T03:34:25,863 DEBUG [Thread-732 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08ba8425 to 127.0.0.1:51621 2024-12-10T03:34:25,864 DEBUG [Thread-732 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:25,864 DEBUG [Thread-730 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:25,867 DEBUG [Thread-734 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7af61386 to 127.0.0.1:51621 2024-12-10T03:34:25,867 DEBUG [Thread-734 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:25,868 DEBUG [Thread-736 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x063e87c8 to 127.0.0.1:51621 2024-12-10T03:34:25,868 DEBUG [Thread-736 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:25,882 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:25,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:25,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:25,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:25,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:26,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:26,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:26,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:26,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:26,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:26,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:26,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:26,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:26,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/ec02fce54340495ca2eca6c5b21f952c 2024-12-10T03:34:26,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bb25b88500cc4d06991cc3190495deb2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bb25b88500cc4d06991cc3190495deb2 2024-12-10T03:34:26,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bb25b88500cc4d06991cc3190495deb2, entries=150, sequenceid=318, filesize=30.5 K 2024-12-10T03:34:26,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/ca755e5b2ac14e6594e1633fb4ada1e2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/ca755e5b2ac14e6594e1633fb4ada1e2 2024-12-10T03:34:26,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/ca755e5b2ac14e6594e1633fb4ada1e2, entries=150, sequenceid=318, filesize=12.0 K 2024-12-10T03:34:26,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/ec02fce54340495ca2eca6c5b21f952c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ec02fce54340495ca2eca6c5b21f952c 2024-12-10T03:34:26,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ec02fce54340495ca2eca6c5b21f952c, entries=150, sequenceid=318, filesize=12.0 K 2024-12-10T03:34:26,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for d58f0e0e176fadae27aa99803befaf76 in 1696ms, sequenceid=318, compaction requested=true 2024-12-10T03:34:26,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:26,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:26,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:26,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:26,183 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:26,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:26,183 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:26,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d58f0e0e176fadae27aa99803befaf76:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:26,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:26,185 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:26,185 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143018 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:26,185 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/B is initiating minor compaction (all files) 2024-12-10T03:34:26,185 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/A is initiating minor compaction (all files) 2024-12-10T03:34:26,185 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/B in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:26,185 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/A in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:26,185 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d722168f19f04f72afd98e0af11e12bf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/cece2dcf9e104ac9b344eb49e90cd6f4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/839e7b67590542ee9a3702cc5d7aae55, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/ca755e5b2ac14e6594e1633fb4ada1e2] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=48.7 K 2024-12-10T03:34:26,185 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/6db6384a0219497db388e46e8ead2313, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/c3a45e4516094b68a1b26178f425c447, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/08c762aea2fc415b84e5c5543a849ae4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bb25b88500cc4d06991cc3190495deb2] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=139.7 K 2024-12-10T03:34:26,185 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:26,186 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/6db6384a0219497db388e46e8ead2313, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/c3a45e4516094b68a1b26178f425c447, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/08c762aea2fc415b84e5c5543a849ae4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bb25b88500cc4d06991cc3190495deb2] 2024-12-10T03:34:26,186 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d722168f19f04f72afd98e0af11e12bf, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733801660355 2024-12-10T03:34:26,186 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6db6384a0219497db388e46e8ead2313, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733801660355 2024-12-10T03:34:26,187 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting cece2dcf9e104ac9b344eb49e90cd6f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801661470 2024-12-10T03:34:26,187 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3a45e4516094b68a1b26178f425c447, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801661470 2024-12-10T03:34:26,187 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 839e7b67590542ee9a3702cc5d7aae55, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733801661643 2024-12-10T03:34:26,188 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08c762aea2fc415b84e5c5543a849ae4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733801661643 2024-12-10T03:34:26,188 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ca755e5b2ac14e6594e1633fb4ada1e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733801662768 2024-12-10T03:34:26,188 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb25b88500cc4d06991cc3190495deb2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733801662768 2024-12-10T03:34:26,192 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:26,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-10T03:34:26,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:26,193 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T03:34:26,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:26,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:26,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:26,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:26,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:26,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:26,203 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#B#compaction#198 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:26,203 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/3e86a415b6f04a709046c677695c49de is 50, key is test_row_0/B:col10/1733801662768/Put/seqid=0 2024-12-10T03:34:26,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210eab249fb460c445095ffa7618cfeea20_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_0/A:col10/1733801664526/Put/seqid=0 2024-12-10T03:34:26,207 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:26,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742062_1238 (size=13051) 2024-12-10T03:34:26,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210cde93898094b42318bdb840907bb9677_d58f0e0e176fadae27aa99803befaf76 store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:26,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742063_1239 (size=12454) 2024-12-10T03:34:26,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:26,227 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210eab249fb460c445095ffa7618cfeea20_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210eab249fb460c445095ffa7618cfeea20_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:26,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/615fd693e1584a56873bf18a987becc1, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:26,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/615fd693e1584a56873bf18a987becc1 is 175, key is test_row_0/A:col10/1733801664526/Put/seqid=0 2024-12-10T03:34:26,233 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210cde93898094b42318bdb840907bb9677_d58f0e0e176fadae27aa99803befaf76, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:26,233 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210cde93898094b42318bdb840907bb9677_d58f0e0e176fadae27aa99803befaf76 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:26,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742064_1240 (size=31255) 2024-12-10T03:34:26,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742065_1241 (size=4469) 2024-12-10T03:34:26,241 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#A#compaction#200 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:26,242 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/21e52ba2f3c54f9cbb4dd21c30ec3d20 is 175, key is test_row_0/A:col10/1733801662768/Put/seqid=0 2024-12-10T03:34:26,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742066_1242 (size=32005) 2024-12-10T03:34:26,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T03:34:26,618 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/3e86a415b6f04a709046c677695c49de as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3e86a415b6f04a709046c677695c49de 2024-12-10T03:34:26,622 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/B of d58f0e0e176fadae27aa99803befaf76 into 3e86a415b6f04a709046c677695c49de(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:26,622 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:26,622 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/B, priority=12, startTime=1733801666183; duration=0sec 2024-12-10T03:34:26,622 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:26,622 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:B 2024-12-10T03:34:26,622 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:26,624 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:26,624 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): d58f0e0e176fadae27aa99803befaf76/C is initiating minor compaction (all files) 2024-12-10T03:34:26,624 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d58f0e0e176fadae27aa99803befaf76/C in TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:26,624 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/417adb7c54b04ccc8bb8b7bbfb471a62, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/da712079551441e3ae39b363a78b2a38, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/579bf0700f184d27ab0645a543fb9566, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ec02fce54340495ca2eca6c5b21f952c] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp, totalSize=48.7 K 2024-12-10T03:34:26,624 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 417adb7c54b04ccc8bb8b7bbfb471a62, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733801660355 2024-12-10T03:34:26,624 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting da712079551441e3ae39b363a78b2a38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801661470 2024-12-10T03:34:26,625 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 579bf0700f184d27ab0645a543fb9566, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733801661643 2024-12-10T03:34:26,625 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ec02fce54340495ca2eca6c5b21f952c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733801662768 2024-12-10T03:34:26,633 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d58f0e0e176fadae27aa99803befaf76#C#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:26,634 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/2409f14bb0dd41fa836fb1aa5130ae28 is 50, key is test_row_0/C:col10/1733801662768/Put/seqid=0 2024-12-10T03:34:26,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742067_1243 (size=13051) 2024-12-10T03:34:26,641 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=337, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/615fd693e1584a56873bf18a987becc1 2024-12-10T03:34:26,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/fe184d7d3c7146c18a4dba1375bb0651 is 50, key is test_row_0/B:col10/1733801664526/Put/seqid=0 2024-12-10T03:34:26,651 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/21e52ba2f3c54f9cbb4dd21c30ec3d20 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/21e52ba2f3c54f9cbb4dd21c30ec3d20 2024-12-10T03:34:26,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742068_1244 (size=12301) 2024-12-10T03:34:26,652 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/fe184d7d3c7146c18a4dba1375bb0651 2024-12-10T03:34:26,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:26,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. as already flushing 2024-12-10T03:34:26,653 DEBUG [Thread-725 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x367f47f7 to 127.0.0.1:51621 2024-12-10T03:34:26,653 DEBUG [Thread-725 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:26,655 DEBUG [Thread-727 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78cafade to 127.0.0.1:51621 2024-12-10T03:34:26,656 DEBUG [Thread-727 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:26,656 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/A of d58f0e0e176fadae27aa99803befaf76 into 21e52ba2f3c54f9cbb4dd21c30ec3d20(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:26,656 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:26,656 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/A, priority=12, startTime=1733801666183; duration=0sec 2024-12-10T03:34:26,657 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:26,657 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:A 2024-12-10T03:34:26,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/0d3340584a324f758c6253e82e094668 is 50, key is test_row_0/C:col10/1733801664526/Put/seqid=0 2024-12-10T03:34:26,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742069_1245 (size=12301) 2024-12-10T03:34:26,892 DEBUG [Thread-723 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x505d5ccd to 127.0.0.1:51621 2024-12-10T03:34:26,892 DEBUG [Thread-723 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:26,893 DEBUG [Thread-719 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b4bd1ba to 127.0.0.1:51621 2024-12-10T03:34:26,893 DEBUG [Thread-719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:26,901 DEBUG [Thread-721 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ebda6ad to 127.0.0.1:51621 2024-12-10T03:34:26,901 DEBUG [Thread-721 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:27,049 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/2409f14bb0dd41fa836fb1aa5130ae28 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/2409f14bb0dd41fa836fb1aa5130ae28 2024-12-10T03:34:27,055 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d58f0e0e176fadae27aa99803befaf76/C of d58f0e0e176fadae27aa99803befaf76 into 2409f14bb0dd41fa836fb1aa5130ae28(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:27,056 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:27,056 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76., storeName=d58f0e0e176fadae27aa99803befaf76/C, priority=12, startTime=1733801666183; duration=0sec 2024-12-10T03:34:27,056 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:27,056 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d58f0e0e176fadae27aa99803befaf76:C 2024-12-10T03:34:27,063 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/0d3340584a324f758c6253e82e094668 2024-12-10T03:34:27,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/615fd693e1584a56873bf18a987becc1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/615fd693e1584a56873bf18a987becc1 2024-12-10T03:34:27,072 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/615fd693e1584a56873bf18a987becc1, entries=150, sequenceid=337, filesize=30.5 K 2024-12-10T03:34:27,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/fe184d7d3c7146c18a4dba1375bb0651 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fe184d7d3c7146c18a4dba1375bb0651 2024-12-10T03:34:27,077 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fe184d7d3c7146c18a4dba1375bb0651, entries=150, sequenceid=337, filesize=12.0 K 2024-12-10T03:34:27,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/0d3340584a324f758c6253e82e094668 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/0d3340584a324f758c6253e82e094668 2024-12-10T03:34:27,082 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/0d3340584a324f758c6253e82e094668, entries=150, sequenceid=337, filesize=12.0 K 2024-12-10T03:34:27,083 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=33.54 KB/34350 for d58f0e0e176fadae27aa99803befaf76 in 890ms, sequenceid=337, compaction requested=false 2024-12-10T03:34:27,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:27,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:27,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-10T03:34:27,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-10T03:34:27,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-10T03:34:27,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5730 sec 2024-12-10T03:34:27,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.5750 sec 2024-12-10T03:34:28,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-10T03:34:28,617 INFO [Thread-729 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9247 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9068 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3940 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11820 rows 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3938 2024-12-10T03:34:28,618 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 11814 rows 2024-12-10T03:34:28,619 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T03:34:28,619 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x407e6b5c to 127.0.0.1:51621 2024-12-10T03:34:28,619 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:28,625 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T03:34:28,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T03:34:28,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:28,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T03:34:28,629 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801668628"}]},"ts":"1733801668628"} 2024-12-10T03:34:28,629 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T03:34:28,668 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T03:34:28,669 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:34:28,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, UNASSIGN}] 2024-12-10T03:34:28,672 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, UNASSIGN 2024-12-10T03:34:28,674 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=d58f0e0e176fadae27aa99803befaf76, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:28,675 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:34:28,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:28,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T03:34:28,828 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:28,830 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:28,830 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:34:28,830 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing d58f0e0e176fadae27aa99803befaf76, disabling compactions & flushes 2024-12-10T03:34:28,830 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:28,831 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:28,831 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. after waiting 0 ms 2024-12-10T03:34:28,831 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:28,831 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing d58f0e0e176fadae27aa99803befaf76 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T03:34:28,832 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=A 2024-12-10T03:34:28,832 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:28,832 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=B 2024-12-10T03:34:28,832 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:28,832 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d58f0e0e176fadae27aa99803befaf76, store=C 2024-12-10T03:34:28,833 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:28,843 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121071a58d39f57e4c66a769a377b8d8b83d_d58f0e0e176fadae27aa99803befaf76 is 50, key is test_row_1/A:col10/1733801666900/Put/seqid=0 2024-12-10T03:34:28,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742070_1246 (size=9914) 2024-12-10T03:34:28,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T03:34:29,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T03:34:29,248 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:29,256 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121071a58d39f57e4c66a769a377b8d8b83d_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121071a58d39f57e4c66a769a377b8d8b83d_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:29,257 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bcfa8a614a614ed9800f17b00f62e327, store: [table=TestAcidGuarantees family=A region=d58f0e0e176fadae27aa99803befaf76] 2024-12-10T03:34:29,258 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bcfa8a614a614ed9800f17b00f62e327 is 175, key is test_row_1/A:col10/1733801666900/Put/seqid=0 2024-12-10T03:34:29,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742071_1247 (size=22561) 2024-12-10T03:34:29,372 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T03:34:29,663 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=348, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bcfa8a614a614ed9800f17b00f62e327 2024-12-10T03:34:29,670 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/bfac88915a4d494c86eef8a1e480f54b is 50, key is test_row_1/B:col10/1733801666900/Put/seqid=0 2024-12-10T03:34:29,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742072_1248 (size=9857) 2024-12-10T03:34:29,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T03:34:30,076 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/bfac88915a4d494c86eef8a1e480f54b 2024-12-10T03:34:30,088 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/a7592ebcdefb429a97b81435eb3f93be is 50, key is test_row_1/C:col10/1733801666900/Put/seqid=0 2024-12-10T03:34:30,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742073_1249 (size=9857) 2024-12-10T03:34:30,495 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/a7592ebcdefb429a97b81435eb3f93be 2024-12-10T03:34:30,504 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/A/bcfa8a614a614ed9800f17b00f62e327 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bcfa8a614a614ed9800f17b00f62e327 2024-12-10T03:34:30,512 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bcfa8a614a614ed9800f17b00f62e327, entries=100, sequenceid=348, filesize=22.0 K 2024-12-10T03:34:30,513 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/B/bfac88915a4d494c86eef8a1e480f54b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/bfac88915a4d494c86eef8a1e480f54b 2024-12-10T03:34:30,519 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/bfac88915a4d494c86eef8a1e480f54b, entries=100, sequenceid=348, filesize=9.6 K 2024-12-10T03:34:30,520 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/.tmp/C/a7592ebcdefb429a97b81435eb3f93be as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/a7592ebcdefb429a97b81435eb3f93be 2024-12-10T03:34:30,526 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/a7592ebcdefb429a97b81435eb3f93be, entries=100, sequenceid=348, filesize=9.6 K 2024-12-10T03:34:30,527 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for d58f0e0e176fadae27aa99803befaf76 in 1696ms, sequenceid=348, compaction requested=true 2024-12-10T03:34:30,528 DEBUG [StoreCloser-TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/22e81ff7afd84e82928262693d9ec126, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/8103c729a4424b478e8c4aec871bb11c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/e68b7f4de9154691b085807e7f5fc16b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/42fb424984134adc9d8a49a2d2069c19, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bccb73f10040413397f8de8021a9623a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b7a87f0021f241c8aada33ee667ae252, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/a1e10a1db1bb49d78c058be261c8f21d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/30fda6c2b21c4c508e654606838f68c4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/7b8d31d8ff2c4665b53666047fd8a551, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/61769daff09043edaae5350d7eb69f3a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/3b9a6f769c514f4fb0b98f36336b118d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9086b712be114724ac2f7bdf02280372, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b629cdf790e34b34924b0e8649c3318b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b303d00f40884aec8e8fda5364c3b76b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/37535a4c92294f60bdc24278878116d4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1b87e757d47942b4bf135e38e7c8886b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1ddfd1e810dc4b10abc19d3fef698883, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/6db6384a0219497db388e46e8ead2313, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9f97a6a6a2d144d0b9e070fcf85a3d48, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/c3a45e4516094b68a1b26178f425c447, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/08c762aea2fc415b84e5c5543a849ae4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bb25b88500cc4d06991cc3190495deb2] to archive 2024-12-10T03:34:30,529 DEBUG [StoreCloser-TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:30,532 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b7a87f0021f241c8aada33ee667ae252 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b7a87f0021f241c8aada33ee667ae252 2024-12-10T03:34:30,532 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/8103c729a4424b478e8c4aec871bb11c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/8103c729a4424b478e8c4aec871bb11c 2024-12-10T03:34:30,532 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/22e81ff7afd84e82928262693d9ec126 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/22e81ff7afd84e82928262693d9ec126 2024-12-10T03:34:30,532 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bccb73f10040413397f8de8021a9623a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bccb73f10040413397f8de8021a9623a 2024-12-10T03:34:30,532 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/e68b7f4de9154691b085807e7f5fc16b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/e68b7f4de9154691b085807e7f5fc16b 2024-12-10T03:34:30,532 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/30fda6c2b21c4c508e654606838f68c4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/30fda6c2b21c4c508e654606838f68c4 2024-12-10T03:34:30,533 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/42fb424984134adc9d8a49a2d2069c19 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/42fb424984134adc9d8a49a2d2069c19 2024-12-10T03:34:30,533 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/a1e10a1db1bb49d78c058be261c8f21d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/a1e10a1db1bb49d78c058be261c8f21d 2024-12-10T03:34:30,534 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9086b712be114724ac2f7bdf02280372 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9086b712be114724ac2f7bdf02280372 2024-12-10T03:34:30,534 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b629cdf790e34b34924b0e8649c3318b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b629cdf790e34b34924b0e8649c3318b 2024-12-10T03:34:30,534 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/37535a4c92294f60bdc24278878116d4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/37535a4c92294f60bdc24278878116d4 2024-12-10T03:34:30,534 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/3b9a6f769c514f4fb0b98f36336b118d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/3b9a6f769c514f4fb0b98f36336b118d 2024-12-10T03:34:30,534 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/61769daff09043edaae5350d7eb69f3a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/61769daff09043edaae5350d7eb69f3a 2024-12-10T03:34:30,534 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/7b8d31d8ff2c4665b53666047fd8a551 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/7b8d31d8ff2c4665b53666047fd8a551 2024-12-10T03:34:30,535 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b303d00f40884aec8e8fda5364c3b76b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/b303d00f40884aec8e8fda5364c3b76b 2024-12-10T03:34:30,535 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1b87e757d47942b4bf135e38e7c8886b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1b87e757d47942b4bf135e38e7c8886b 2024-12-10T03:34:30,536 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/6db6384a0219497db388e46e8ead2313 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/6db6384a0219497db388e46e8ead2313 2024-12-10T03:34:30,536 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9f97a6a6a2d144d0b9e070fcf85a3d48 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/9f97a6a6a2d144d0b9e070fcf85a3d48 2024-12-10T03:34:30,536 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/08c762aea2fc415b84e5c5543a849ae4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/08c762aea2fc415b84e5c5543a849ae4 2024-12-10T03:34:30,536 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bb25b88500cc4d06991cc3190495deb2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bb25b88500cc4d06991cc3190495deb2 2024-12-10T03:34:30,536 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/c3a45e4516094b68a1b26178f425c447 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/c3a45e4516094b68a1b26178f425c447 2024-12-10T03:34:30,536 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1ddfd1e810dc4b10abc19d3fef698883 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/1ddfd1e810dc4b10abc19d3fef698883 2024-12-10T03:34:30,537 DEBUG [StoreCloser-TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/68c53adfa9494dafb33a3adf1dabbd7a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/b22fa2cf6127486d8e3ae14a277143a6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/44f82d9b0a8a48abbdb7d95ac205a2ec, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/95300adea00643129f8383f91d736bed, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/8e160474cf104fb997a02ae83e1cbb2e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/f069e0ca24f94456a8e8180ec67f07c5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/2c9e6b7475c042f5bd7dcc46fdf481c8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/88d035d68b7a4156ab3847b3d901685e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/65d703d99eaf452a991cc7b5c0f3869a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1aa8bf3b0b064781a82e6922238af60f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/30648c6aa93947dc8a2e6bc15043530d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/c3bb3adad12347f89031acc5117d36a9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f2d69ab635d47baac2dad551139896e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d2f2af5a3f574f4faa482d679c06aebf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f33c36a66e04a869823e0cebe972e05, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fba5b3c8768e4bf3bc08ff05190d9b0d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d337e6cefcb1420eb376b97f77525d89, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d722168f19f04f72afd98e0af11e12bf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3375b5d8216f4961a0fa481d7932297a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/cece2dcf9e104ac9b344eb49e90cd6f4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/839e7b67590542ee9a3702cc5d7aae55, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/ca755e5b2ac14e6594e1633fb4ada1e2] to archive 2024-12-10T03:34:30,538 DEBUG [StoreCloser-TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:30,540 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/b22fa2cf6127486d8e3ae14a277143a6 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/b22fa2cf6127486d8e3ae14a277143a6 2024-12-10T03:34:30,540 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/68c53adfa9494dafb33a3adf1dabbd7a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/68c53adfa9494dafb33a3adf1dabbd7a 2024-12-10T03:34:30,540 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/44f82d9b0a8a48abbdb7d95ac205a2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/44f82d9b0a8a48abbdb7d95ac205a2ec 2024-12-10T03:34:30,540 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/8e160474cf104fb997a02ae83e1cbb2e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/8e160474cf104fb997a02ae83e1cbb2e 2024-12-10T03:34:30,540 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/f069e0ca24f94456a8e8180ec67f07c5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/f069e0ca24f94456a8e8180ec67f07c5 2024-12-10T03:34:30,540 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/88d035d68b7a4156ab3847b3d901685e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/88d035d68b7a4156ab3847b3d901685e 2024-12-10T03:34:30,540 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/95300adea00643129f8383f91d736bed to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/95300adea00643129f8383f91d736bed 2024-12-10T03:34:30,541 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/2c9e6b7475c042f5bd7dcc46fdf481c8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/2c9e6b7475c042f5bd7dcc46fdf481c8 2024-12-10T03:34:30,542 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/65d703d99eaf452a991cc7b5c0f3869a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/65d703d99eaf452a991cc7b5c0f3869a 2024-12-10T03:34:30,542 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1aa8bf3b0b064781a82e6922238af60f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1aa8bf3b0b064781a82e6922238af60f 2024-12-10T03:34:30,542 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/30648c6aa93947dc8a2e6bc15043530d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/30648c6aa93947dc8a2e6bc15043530d 2024-12-10T03:34:30,542 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/c3bb3adad12347f89031acc5117d36a9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/c3bb3adad12347f89031acc5117d36a9 2024-12-10T03:34:30,543 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f2d69ab635d47baac2dad551139896e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f2d69ab635d47baac2dad551139896e 2024-12-10T03:34:30,543 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d2f2af5a3f574f4faa482d679c06aebf to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d2f2af5a3f574f4faa482d679c06aebf 2024-12-10T03:34:30,543 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f33c36a66e04a869823e0cebe972e05 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/1f33c36a66e04a869823e0cebe972e05 2024-12-10T03:34:30,543 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fba5b3c8768e4bf3bc08ff05190d9b0d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fba5b3c8768e4bf3bc08ff05190d9b0d 2024-12-10T03:34:30,544 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d722168f19f04f72afd98e0af11e12bf to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d722168f19f04f72afd98e0af11e12bf 2024-12-10T03:34:30,544 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d337e6cefcb1420eb376b97f77525d89 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/d337e6cefcb1420eb376b97f77525d89 2024-12-10T03:34:30,544 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3375b5d8216f4961a0fa481d7932297a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3375b5d8216f4961a0fa481d7932297a 2024-12-10T03:34:30,544 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/cece2dcf9e104ac9b344eb49e90cd6f4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/cece2dcf9e104ac9b344eb49e90cd6f4 2024-12-10T03:34:30,544 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/839e7b67590542ee9a3702cc5d7aae55 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/839e7b67590542ee9a3702cc5d7aae55 2024-12-10T03:34:30,544 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/ca755e5b2ac14e6594e1633fb4ada1e2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/ca755e5b2ac14e6594e1633fb4ada1e2 2024-12-10T03:34:30,545 DEBUG [StoreCloser-TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/aad636305f5b4e988d532e5476ab77d3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/176a0ad48a9247d2a9a7b242c766169d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/7742142f4ae540b5a270b7a7941344e6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4e0fec1a08a642aeb6ee899562606fad, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/fcb2bd33594a458cb755e3eb30b81ab1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/00651e294a374c6d91b1f3e5e4e195f7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/65511ae70142469ca391c637009b42e9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/406b5f4fe7974a529b204d3ae619e9f7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/23a632373ca5467eb7721451246755b9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/f8e831e3d3264757addda0ee6120dac8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/3caa96415a1a414682e590e436d24a36, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ab9b8856e1104fc6acfe5b48ffd56f1b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4048f7dd3e8c4b83bbd74a3b9e9b567f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/69342da4f9b4404bbb0d44337407f024, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ee1d3ef879af4b71becb56b444198dc7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/98cf934235984572876d95d9d0944310, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/32f9047f2ca7441ea4d28fa133d10512, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/417adb7c54b04ccc8bb8b7bbfb471a62, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/56f10c7344914be785b6995c9973756e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/da712079551441e3ae39b363a78b2a38, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/579bf0700f184d27ab0645a543fb9566, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ec02fce54340495ca2eca6c5b21f952c] to archive 2024-12-10T03:34:30,546 DEBUG [StoreCloser-TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:30,547 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/aad636305f5b4e988d532e5476ab77d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/aad636305f5b4e988d532e5476ab77d3 2024-12-10T03:34:30,548 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4e0fec1a08a642aeb6ee899562606fad to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4e0fec1a08a642aeb6ee899562606fad 2024-12-10T03:34:30,548 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/7742142f4ae540b5a270b7a7941344e6 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/7742142f4ae540b5a270b7a7941344e6 2024-12-10T03:34:30,548 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/00651e294a374c6d91b1f3e5e4e195f7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/00651e294a374c6d91b1f3e5e4e195f7 2024-12-10T03:34:30,548 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/406b5f4fe7974a529b204d3ae619e9f7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/406b5f4fe7974a529b204d3ae619e9f7 2024-12-10T03:34:30,548 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/176a0ad48a9247d2a9a7b242c766169d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/176a0ad48a9247d2a9a7b242c766169d 2024-12-10T03:34:30,548 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/65511ae70142469ca391c637009b42e9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/65511ae70142469ca391c637009b42e9 2024-12-10T03:34:30,548 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/fcb2bd33594a458cb755e3eb30b81ab1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/fcb2bd33594a458cb755e3eb30b81ab1 2024-12-10T03:34:30,549 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/3caa96415a1a414682e590e436d24a36 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/3caa96415a1a414682e590e436d24a36 2024-12-10T03:34:30,549 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/f8e831e3d3264757addda0ee6120dac8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/f8e831e3d3264757addda0ee6120dac8 2024-12-10T03:34:30,550 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/23a632373ca5467eb7721451246755b9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/23a632373ca5467eb7721451246755b9 2024-12-10T03:34:30,550 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4048f7dd3e8c4b83bbd74a3b9e9b567f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/4048f7dd3e8c4b83bbd74a3b9e9b567f 2024-12-10T03:34:30,550 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ee1d3ef879af4b71becb56b444198dc7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ee1d3ef879af4b71becb56b444198dc7 2024-12-10T03:34:30,550 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ab9b8856e1104fc6acfe5b48ffd56f1b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ab9b8856e1104fc6acfe5b48ffd56f1b 2024-12-10T03:34:30,550 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/69342da4f9b4404bbb0d44337407f024 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/69342da4f9b4404bbb0d44337407f024 2024-12-10T03:34:30,550 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/98cf934235984572876d95d9d0944310 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/98cf934235984572876d95d9d0944310 2024-12-10T03:34:30,551 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/417adb7c54b04ccc8bb8b7bbfb471a62 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/417adb7c54b04ccc8bb8b7bbfb471a62 2024-12-10T03:34:30,551 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/56f10c7344914be785b6995c9973756e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/56f10c7344914be785b6995c9973756e 2024-12-10T03:34:30,551 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/32f9047f2ca7441ea4d28fa133d10512 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/32f9047f2ca7441ea4d28fa133d10512 2024-12-10T03:34:30,552 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/da712079551441e3ae39b363a78b2a38 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/da712079551441e3ae39b363a78b2a38 2024-12-10T03:34:30,552 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/579bf0700f184d27ab0645a543fb9566 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/579bf0700f184d27ab0645a543fb9566 2024-12-10T03:34:30,552 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ec02fce54340495ca2eca6c5b21f952c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/ec02fce54340495ca2eca6c5b21f952c 2024-12-10T03:34:30,556 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/recovered.edits/351.seqid, newMaxSeqId=351, maxSeqId=4 2024-12-10T03:34:30,557 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76. 2024-12-10T03:34:30,557 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for d58f0e0e176fadae27aa99803befaf76: 2024-12-10T03:34:30,558 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,558 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=d58f0e0e176fadae27aa99803befaf76, regionState=CLOSED 2024-12-10T03:34:30,560 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-10T03:34:30,560 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure d58f0e0e176fadae27aa99803befaf76, server=50b9ef1c5472,37553,1733801610862 in 1.8830 sec 2024-12-10T03:34:30,562 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-12-10T03:34:30,562 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d58f0e0e176fadae27aa99803befaf76, UNASSIGN in 1.8890 sec 2024-12-10T03:34:30,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-10T03:34:30,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8930 sec 2024-12-10T03:34:30,564 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801670564"}]},"ts":"1733801670564"} 2024-12-10T03:34:30,565 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T03:34:30,601 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T03:34:30,603 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9760 sec 2024-12-10T03:34:30,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T03:34:30,737 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-10T03:34:30,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T03:34:30,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:30,742 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:30,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T03:34:30,744 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:30,747 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,752 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/recovered.edits] 2024-12-10T03:34:30,759 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/21e52ba2f3c54f9cbb4dd21c30ec3d20 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/21e52ba2f3c54f9cbb4dd21c30ec3d20 2024-12-10T03:34:30,759 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/615fd693e1584a56873bf18a987becc1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/615fd693e1584a56873bf18a987becc1 2024-12-10T03:34:30,759 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bcfa8a614a614ed9800f17b00f62e327 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/A/bcfa8a614a614ed9800f17b00f62e327 2024-12-10T03:34:30,764 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/bfac88915a4d494c86eef8a1e480f54b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/bfac88915a4d494c86eef8a1e480f54b 2024-12-10T03:34:30,764 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fe184d7d3c7146c18a4dba1375bb0651 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/fe184d7d3c7146c18a4dba1375bb0651 2024-12-10T03:34:30,764 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3e86a415b6f04a709046c677695c49de to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/B/3e86a415b6f04a709046c677695c49de 2024-12-10T03:34:30,768 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/0d3340584a324f758c6253e82e094668 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/0d3340584a324f758c6253e82e094668 2024-12-10T03:34:30,768 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/a7592ebcdefb429a97b81435eb3f93be to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/a7592ebcdefb429a97b81435eb3f93be 2024-12-10T03:34:30,768 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/2409f14bb0dd41fa836fb1aa5130ae28 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/C/2409f14bb0dd41fa836fb1aa5130ae28 2024-12-10T03:34:30,771 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/recovered.edits/351.seqid to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76/recovered.edits/351.seqid 2024-12-10T03:34:30,771 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,771 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T03:34:30,772 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T03:34:30,772 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T03:34:30,780 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121010129af3f5614b4e94352237305e3f7f_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121010129af3f5614b4e94352237305e3f7f_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,780 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210112a167d10e54ad1ba087ed849478b32_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210112a167d10e54ad1ba087ed849478b32_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,780 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121042ada89f86e746ab96577671ff2db20d_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121042ada89f86e746ab96577671ff2db20d_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,780 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210521e6dd8369b42e0b061da5d232a8b38_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210521e6dd8369b42e0b061da5d232a8b38_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,781 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104c7d47a6604245ecae59d80df27bb75e_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104c7d47a6604245ecae59d80df27bb75e_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,781 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104c6ae5d886d54998925a6939288268c4_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104c6ae5d886d54998925a6939288268c4_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,781 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121063f104ff77ae4e6aac64acf91f0ce647_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121063f104ff77ae4e6aac64acf91f0ce647_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,781 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106bfc0a825bec4d09abf6c0d85eab451f_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106bfc0a825bec4d09abf6c0d85eab451f_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,782 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106f7dbfa486a7490fa46d46df909e8bd2_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106f7dbfa486a7490fa46d46df909e8bd2_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,782 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106f92ade53fee45cc85161fc0d67b335c_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412106f92ade53fee45cc85161fc0d67b335c_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,782 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121071a58d39f57e4c66a769a377b8d8b83d_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121071a58d39f57e4c66a769a377b8d8b83d_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,782 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210774bed0ae2be40ba9602047c07af10e7_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210774bed0ae2be40ba9602047c07af10e7_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,782 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210759b2e6ed4894127ac1aa04f584e1819_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210759b2e6ed4894127ac1aa04f584e1819_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,782 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d3f15c53ecc54a43b4cec7ad0ad7043c_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d3f15c53ecc54a43b4cec7ad0ad7043c_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,782 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210886eb000351f4398b60adf7ce83cee4a_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210886eb000351f4398b60adf7ce83cee4a_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,782 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210eab249fb460c445095ffa7618cfeea20_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210eab249fb460c445095ffa7618cfeea20_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,783 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ec7cf643d3a3486c97540129e2fe7bff_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ec7cf643d3a3486c97540129e2fe7bff_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,783 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f210a69c818f496c88690f5e9fab819e_d58f0e0e176fadae27aa99803befaf76 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f210a69c818f496c88690f5e9fab819e_d58f0e0e176fadae27aa99803befaf76 2024-12-10T03:34:30,784 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T03:34:30,785 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:30,787 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T03:34:30,789 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T03:34:30,790 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:30,790 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T03:34:30,790 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733801670790"}]},"ts":"9223372036854775807"} 2024-12-10T03:34:30,792 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T03:34:30,792 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d58f0e0e176fadae27aa99803befaf76, NAME => 'TestAcidGuarantees,,1733801642589.d58f0e0e176fadae27aa99803befaf76.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T03:34:30,792 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T03:34:30,792 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733801670792"}]},"ts":"9223372036854775807"} 2024-12-10T03:34:30,793 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T03:34:30,801 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:30,802 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 62 msec 2024-12-10T03:34:30,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T03:34:30,846 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-10T03:34:30,860 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=248 (was 247) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-627809423_22 at /127.0.0.1:33162 [Waiting for operation #66] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2dd385a3-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2dd385a3-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-627809423_22 at /127.0.0.1:33406 [Waiting for operation #665] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2dd385a3-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x2dd385a3-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1054130048_22 at /127.0.0.1:33160 [Waiting for operation #67] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1054130048_22 at /127.0.0.1:36768 [Waiting for operation #251] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=466 (was 458) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=279 (was 211) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3350 (was 3480) 2024-12-10T03:34:30,872 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=248, OpenFileDescriptor=466, MaxFileDescriptor=1048576, SystemLoadAverage=279, ProcessCount=11, AvailableMemoryMB=3349 2024-12-10T03:34:30,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:34:30,874 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:34:30,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:30,877 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T03:34:30,877 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:30,878 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T03:34:30,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-12-10T03:34:30,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-10T03:34:30,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742074_1250 (size=963) 2024-12-10T03:34:30,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-10T03:34:31,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-10T03:34:31,289 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:34:31,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742075_1251 (size=53) 2024-12-10T03:34:31,320 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:34:31,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 782122bcf84a1b8761da14e17014109c, disabling compactions & flushes 2024-12-10T03:34:31,321 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:31,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:31,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. after waiting 0 ms 2024-12-10T03:34:31,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:31,321 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:31,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:31,322 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T03:34:31,323 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733801671322"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733801671322"}]},"ts":"1733801671322"} 2024-12-10T03:34:31,324 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T03:34:31,325 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T03:34:31,326 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801671325"}]},"ts":"1733801671325"} 2024-12-10T03:34:31,327 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T03:34:31,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=782122bcf84a1b8761da14e17014109c, ASSIGN}] 2024-12-10T03:34:31,353 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=782122bcf84a1b8761da14e17014109c, ASSIGN 2024-12-10T03:34:31,354 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=782122bcf84a1b8761da14e17014109c, ASSIGN; state=OFFLINE, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=false 2024-12-10T03:34:31,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-10T03:34:31,505 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=782122bcf84a1b8761da14e17014109c, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:31,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:31,661 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:31,668 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:31,669 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:34:31,670 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,670 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:34:31,670 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,670 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,672 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,674 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:31,674 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 782122bcf84a1b8761da14e17014109c columnFamilyName A 2024-12-10T03:34:31,674 DEBUG [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:31,675 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.HStore(327): Store=782122bcf84a1b8761da14e17014109c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:31,675 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,676 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:31,677 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 782122bcf84a1b8761da14e17014109c columnFamilyName B 2024-12-10T03:34:31,677 DEBUG [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:31,677 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.HStore(327): Store=782122bcf84a1b8761da14e17014109c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:31,677 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,679 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:31,679 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 782122bcf84a1b8761da14e17014109c columnFamilyName C 2024-12-10T03:34:31,679 DEBUG [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:31,680 INFO [StoreOpener-782122bcf84a1b8761da14e17014109c-1 {}] regionserver.HStore(327): Store=782122bcf84a1b8761da14e17014109c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:31,680 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:31,681 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,681 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,683 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:34:31,685 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:31,689 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:34:31,689 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 782122bcf84a1b8761da14e17014109c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66524630, jitterRate=-0.008705765008926392}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:34:31,690 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:31,691 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., pid=66, masterSystemTime=1733801671661 2024-12-10T03:34:31,692 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:31,693 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:31,693 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=782122bcf84a1b8761da14e17014109c, regionState=OPEN, openSeqNum=2, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:31,695 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-10T03:34:31,695 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 in 187 msec 2024-12-10T03:34:31,697 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-10T03:34:31,697 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=782122bcf84a1b8761da14e17014109c, ASSIGN in 344 msec 2024-12-10T03:34:31,697 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T03:34:31,697 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801671697"}]},"ts":"1733801671697"} 2024-12-10T03:34:31,698 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T03:34:31,769 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T03:34:31,771 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 894 msec 2024-12-10T03:34:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-10T03:34:31,987 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-12-10T03:34:31,991 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x028e73c0 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64ee0130 2024-12-10T03:34:32,002 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aa9ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,005 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,007 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36692, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,009 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T03:34:32,011 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T03:34:32,015 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-12-10T03:34:32,026 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,028 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-12-10T03:34:32,035 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,036 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-12-10T03:34:32,043 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,045 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fef31f8 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14ed1e44 2024-12-10T03:34:32,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b04266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,054 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-12-10T03:34:32,060 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,062 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x036642cb to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e998dd3 2024-12-10T03:34:32,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131ceb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,070 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-12-10T03:34:32,077 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,079 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-12-10T03:34:32,085 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,087 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3677bd4f to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3bf0ba59 2024-12-10T03:34:32,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9e2976, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,095 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x521aad6f to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c86f707 2024-12-10T03:34:32,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56e9a678, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:32,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:32,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-10T03:34:32,108 DEBUG [hconnection-0x310497b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,108 DEBUG [hconnection-0x6779efe2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,108 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:32,108 DEBUG [hconnection-0x3e34ea1c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,109 DEBUG [hconnection-0x4e239f4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,109 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:32,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T03:34:32,109 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36704, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,109 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36702, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,109 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:32,110 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,110 DEBUG [hconnection-0x6b15d3f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,110 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,110 DEBUG [hconnection-0x4f350d04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,111 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,111 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,112 DEBUG [hconnection-0x67cb09ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,113 DEBUG [hconnection-0x18432d96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,114 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,114 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,114 DEBUG [hconnection-0x84b5fec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,115 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,116 DEBUG [hconnection-0x4fade4a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:32,117 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:32,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:32,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:34:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:32,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:32,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/1b38bc85273743b7828122d04940edc5 is 50, key is test_row_0/A:col10/1733801672116/Put/seqid=0 2024-12-10T03:34:32,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742076_1252 (size=12001) 2024-12-10T03:34:32,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801732157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801732158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801732158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801732160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801732160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T03:34:32,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:32,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:32,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801732261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801732261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801732261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801732262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801732264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T03:34:32,414 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:32,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:32,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801732463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801732463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801732464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801732466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801732467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/1b38bc85273743b7828122d04940edc5 2024-12-10T03:34:32,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/29605a8053614a7d880c7b8bbd203e64 is 50, key is test_row_0/B:col10/1733801672116/Put/seqid=0 2024-12-10T03:34:32,566 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:32,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:32,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742077_1253 (size=12001) 2024-12-10T03:34:32,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T03:34:32,719 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:32,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:32,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801732765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801732766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801732767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801732769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:32,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801732770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,871 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:32,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:32,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:32,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:32,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/29605a8053614a7d880c7b8bbd203e64 2024-12-10T03:34:33,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f0b0c18925ef4a4590f547a054a7f40b is 50, key is test_row_0/C:col10/1733801672116/Put/seqid=0 2024-12-10T03:34:33,023 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:33,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:33,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:33,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:33,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742078_1254 (size=12001) 2024-12-10T03:34:33,176 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:33,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:33,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:33,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:33,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T03:34:33,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:33,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801733267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:33,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801733268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:33,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801733270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:33,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801733274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:33,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801733275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,328 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:33,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:33,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:33,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:33,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:33,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f0b0c18925ef4a4590f547a054a7f40b 2024-12-10T03:34:33,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/1b38bc85273743b7828122d04940edc5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1b38bc85273743b7828122d04940edc5 2024-12-10T03:34:33,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1b38bc85273743b7828122d04940edc5, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T03:34:33,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/29605a8053614a7d880c7b8bbd203e64 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/29605a8053614a7d880c7b8bbd203e64 2024-12-10T03:34:33,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/29605a8053614a7d880c7b8bbd203e64, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T03:34:33,441 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f0b0c18925ef4a4590f547a054a7f40b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f0b0c18925ef4a4590f547a054a7f40b 2024-12-10T03:34:33,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f0b0c18925ef4a4590f547a054a7f40b, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T03:34:33,446 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 782122bcf84a1b8761da14e17014109c in 1328ms, sequenceid=12, compaction requested=false 2024-12-10T03:34:33,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:33,480 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:33,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T03:34:33,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:33,481 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T03:34:33,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:33,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:33,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:33,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/db57c1cd6c1f4af3af45040bfbe8bee6 is 50, key is test_row_0/A:col10/1733801672156/Put/seqid=0 2024-12-10T03:34:33,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742079_1255 (size=12001) 2024-12-10T03:34:33,917 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/db57c1cd6c1f4af3af45040bfbe8bee6 2024-12-10T03:34:33,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/49b6fc6a00f747c9a56adfd0b4429eff is 50, key is test_row_0/B:col10/1733801672156/Put/seqid=0 2024-12-10T03:34:33,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742080_1256 (size=12001) 2024-12-10T03:34:34,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T03:34:34,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:34,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:34,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801734279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801734279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801734279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801734280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801734283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,329 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/49b6fc6a00f747c9a56adfd0b4429eff 2024-12-10T03:34:34,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/09ae9e4aa6b04262b88ca2c0c83cdfa3 is 50, key is test_row_0/C:col10/1733801672156/Put/seqid=0 2024-12-10T03:34:34,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742081_1257 (size=12001) 2024-12-10T03:34:34,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801734381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801734382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801734382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801734382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801734584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801734584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801734584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801734585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,745 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/09ae9e4aa6b04262b88ca2c0c83cdfa3 2024-12-10T03:34:34,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/db57c1cd6c1f4af3af45040bfbe8bee6 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/db57c1cd6c1f4af3af45040bfbe8bee6 2024-12-10T03:34:34,753 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/db57c1cd6c1f4af3af45040bfbe8bee6, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T03:34:34,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/49b6fc6a00f747c9a56adfd0b4429eff as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/49b6fc6a00f747c9a56adfd0b4429eff 2024-12-10T03:34:34,758 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/49b6fc6a00f747c9a56adfd0b4429eff, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T03:34:34,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/09ae9e4aa6b04262b88ca2c0c83cdfa3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/09ae9e4aa6b04262b88ca2c0c83cdfa3 2024-12-10T03:34:34,763 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/09ae9e4aa6b04262b88ca2c0c83cdfa3, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T03:34:34,764 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 782122bcf84a1b8761da14e17014109c in 1282ms, sequenceid=39, compaction requested=false 2024-12-10T03:34:34,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:34,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:34,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-10T03:34:34,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-10T03:34:34,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-10T03:34:34,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6560 sec 2024-12-10T03:34:34,767 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 2.6600 sec 2024-12-10T03:34:34,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:34,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:34:34,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:34,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:34,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:34,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:34,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:34,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:34,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/9e98016549944a14b6e67953a8bbf2a4 is 50, key is test_row_1/A:col10/1733801674888/Put/seqid=0 2024-12-10T03:34:34,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742082_1258 (size=11997) 2024-12-10T03:34:34,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801734905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801734906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801734906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:34,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:34,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801734907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801735008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801735009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801735010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801735010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,065 DEBUG [master/50b9ef1c5472:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region c325cc16a4923122e0562a36b998907c changed from -1.0 to 0.0, refreshing cache 2024-12-10T03:34:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801735210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801735212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801735212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801735212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,300 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/9e98016549944a14b6e67953a8bbf2a4 2024-12-10T03:34:35,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/cd00d83b691e474fabe13c68a8a4bfe7 is 50, key is test_row_1/B:col10/1733801674888/Put/seqid=0 2024-12-10T03:34:35,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742083_1259 (size=9657) 2024-12-10T03:34:35,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801735513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801735513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801735514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:35,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801735515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:35,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/cd00d83b691e474fabe13c68a8a4bfe7 2024-12-10T03:34:35,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/79f9778078174465ab3a755ef5ec095f is 50, key is test_row_1/C:col10/1733801674888/Put/seqid=0 2024-12-10T03:34:35,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742084_1260 (size=9657) 2024-12-10T03:34:36,015 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:36,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801736015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801736017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:36,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801736018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:36,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801736018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,079 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T03:34:36,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/79f9778078174465ab3a755ef5ec095f 2024-12-10T03:34:36,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/9e98016549944a14b6e67953a8bbf2a4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9e98016549944a14b6e67953a8bbf2a4 2024-12-10T03:34:36,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9e98016549944a14b6e67953a8bbf2a4, entries=150, sequenceid=51, filesize=11.7 K 2024-12-10T03:34:36,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/cd00d83b691e474fabe13c68a8a4bfe7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/cd00d83b691e474fabe13c68a8a4bfe7 2024-12-10T03:34:36,138 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/cd00d83b691e474fabe13c68a8a4bfe7, entries=100, sequenceid=51, filesize=9.4 K 2024-12-10T03:34:36,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/79f9778078174465ab3a755ef5ec095f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/79f9778078174465ab3a755ef5ec095f 2024-12-10T03:34:36,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/79f9778078174465ab3a755ef5ec095f, entries=100, sequenceid=51, filesize=9.4 K 2024-12-10T03:34:36,144 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 782122bcf84a1b8761da14e17014109c in 1256ms, sequenceid=51, compaction requested=true 2024-12-10T03:34:36,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:36,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:36,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:36,144 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:36,144 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:36,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:36,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:36,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:36,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:36,145 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:36,145 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35999 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:36,145 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/A is initiating minor compaction (all files) 2024-12-10T03:34:36,145 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/B is initiating minor compaction (all files) 2024-12-10T03:34:36,145 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/A in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,145 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/B in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,146 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/29605a8053614a7d880c7b8bbd203e64, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/49b6fc6a00f747c9a56adfd0b4429eff, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/cd00d83b691e474fabe13c68a8a4bfe7] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=32.9 K 2024-12-10T03:34:36,146 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1b38bc85273743b7828122d04940edc5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/db57c1cd6c1f4af3af45040bfbe8bee6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9e98016549944a14b6e67953a8bbf2a4] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=35.2 K 2024-12-10T03:34:36,146 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b38bc85273743b7828122d04940edc5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733801672114 2024-12-10T03:34:36,146 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 29605a8053614a7d880c7b8bbd203e64, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733801672114 2024-12-10T03:34:36,146 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting db57c1cd6c1f4af3af45040bfbe8bee6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733801672156 2024-12-10T03:34:36,146 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 49b6fc6a00f747c9a56adfd0b4429eff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733801672156 2024-12-10T03:34:36,146 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e98016549944a14b6e67953a8bbf2a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733801674278 2024-12-10T03:34:36,147 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting cd00d83b691e474fabe13c68a8a4bfe7, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733801674887 2024-12-10T03:34:36,153 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#B#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:36,153 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/7351a3133450422590b906367b928a25 is 50, key is test_row_0/B:col10/1733801672156/Put/seqid=0 2024-12-10T03:34:36,155 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#A#compaction#217 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:36,156 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/c3a8cb37870c4ae99cae31310496cfe8 is 50, key is test_row_0/A:col10/1733801672156/Put/seqid=0 2024-12-10T03:34:36,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742086_1262 (size=12104) 2024-12-10T03:34:36,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742085_1261 (size=12104) 2024-12-10T03:34:36,170 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/7351a3133450422590b906367b928a25 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7351a3133450422590b906367b928a25 2024-12-10T03:34:36,175 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 782122bcf84a1b8761da14e17014109c/B of 782122bcf84a1b8761da14e17014109c into 7351a3133450422590b906367b928a25(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:36,176 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:36,176 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/B, priority=13, startTime=1733801676144; duration=0sec 2024-12-10T03:34:36,176 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:36,176 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:B 2024-12-10T03:34:36,176 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:36,177 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:36,177 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/C is initiating minor compaction (all files) 2024-12-10T03:34:36,178 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/C in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,178 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f0b0c18925ef4a4590f547a054a7f40b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/09ae9e4aa6b04262b88ca2c0c83cdfa3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/79f9778078174465ab3a755ef5ec095f] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=32.9 K 2024-12-10T03:34:36,178 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f0b0c18925ef4a4590f547a054a7f40b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733801672114 2024-12-10T03:34:36,179 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 09ae9e4aa6b04262b88ca2c0c83cdfa3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733801672156 2024-12-10T03:34:36,179 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 79f9778078174465ab3a755ef5ec095f, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733801674887 2024-12-10T03:34:36,197 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#C#compaction#218 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:36,197 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/eadaba43668643ddbe0e5281e5fd047c is 50, key is test_row_0/C:col10/1733801672156/Put/seqid=0 2024-12-10T03:34:36,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742087_1263 (size=12104) 2024-12-10T03:34:36,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T03:34:36,213 INFO [Thread-1164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-10T03:34:36,214 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:36,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-10T03:34:36,216 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:36,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T03:34:36,216 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:36,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:36,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:36,294 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T03:34:36,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:36,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:36,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:36,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:36,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:36,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:36,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:36,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801736309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T03:34:36,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/8745a3902fea4f048e98d44f9d864926 is 50, key is test_row_0/A:col10/1733801676293/Put/seqid=0 2024-12-10T03:34:36,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742088_1264 (size=14341) 2024-12-10T03:34:36,367 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:36,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:36,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:36,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801736412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T03:34:36,520 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:36,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:36,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,563 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/c3a8cb37870c4ae99cae31310496cfe8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/c3a8cb37870c4ae99cae31310496cfe8 2024-12-10T03:34:36,567 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 782122bcf84a1b8761da14e17014109c/A of 782122bcf84a1b8761da14e17014109c into c3a8cb37870c4ae99cae31310496cfe8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:36,567 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:36,567 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/A, priority=13, startTime=1733801676144; duration=0sec 2024-12-10T03:34:36,567 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:36,567 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:A 2024-12-10T03:34:36,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:36,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801736613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,618 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/eadaba43668643ddbe0e5281e5fd047c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/eadaba43668643ddbe0e5281e5fd047c 2024-12-10T03:34:36,623 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 782122bcf84a1b8761da14e17014109c/C of 782122bcf84a1b8761da14e17014109c into eadaba43668643ddbe0e5281e5fd047c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:36,623 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:36,623 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/C, priority=13, startTime=1733801676144; duration=0sec 2024-12-10T03:34:36,623 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:36,623 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:C 2024-12-10T03:34:36,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:36,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:36,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/8745a3902fea4f048e98d44f9d864926 2024-12-10T03:34:36,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/f1a0a1252d08406baa338e47ad1ddba0 is 50, key is test_row_0/B:col10/1733801676293/Put/seqid=0 2024-12-10T03:34:36,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742089_1265 (size=12001) 2024-12-10T03:34:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T03:34:36,824 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:36,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:36,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801736918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,977 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:36,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:36,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:36,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:36,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801737019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801737022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801737023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801737028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,129 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:37,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:37,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:37,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:37,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/f1a0a1252d08406baa338e47ad1ddba0 2024-12-10T03:34:37,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/19166a677a304cc7b7f7f89214630e3e is 50, key is test_row_0/C:col10/1733801676293/Put/seqid=0 2024-12-10T03:34:37,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742090_1266 (size=12001) 2024-12-10T03:34:37,281 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:37,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:37,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:37,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:37,282 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T03:34:37,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801737420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,434 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:37,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:37,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:37,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:37,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:37,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/19166a677a304cc7b7f7f89214630e3e 2024-12-10T03:34:37,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/8745a3902fea4f048e98d44f9d864926 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/8745a3902fea4f048e98d44f9d864926 2024-12-10T03:34:37,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/8745a3902fea4f048e98d44f9d864926, entries=200, sequenceid=78, filesize=14.0 K 2024-12-10T03:34:37,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/f1a0a1252d08406baa338e47ad1ddba0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/f1a0a1252d08406baa338e47ad1ddba0 2024-12-10T03:34:37,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/f1a0a1252d08406baa338e47ad1ddba0, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T03:34:37,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/19166a677a304cc7b7f7f89214630e3e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/19166a677a304cc7b7f7f89214630e3e 2024-12-10T03:34:37,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/19166a677a304cc7b7f7f89214630e3e, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T03:34:37,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 782122bcf84a1b8761da14e17014109c in 1275ms, sequenceid=78, compaction requested=false 2024-12-10T03:34:37,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:37,586 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:37,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T03:34:37,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:37,587 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T03:34:37,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:37,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:37,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:37,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:37,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:37,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:37,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/0832cc39a509449dbbd0647bd81558d5 is 50, key is test_row_0/A:col10/1733801676308/Put/seqid=0 2024-12-10T03:34:37,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742091_1267 (size=12001) 2024-12-10T03:34:37,995 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/0832cc39a509449dbbd0647bd81558d5 2024-12-10T03:34:38,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/4c2b214a0a1f44a6864a813121ba23d8 is 50, key is test_row_0/B:col10/1733801676308/Put/seqid=0 2024-12-10T03:34:38,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742092_1268 (size=12001) 2024-12-10T03:34:38,008 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/4c2b214a0a1f44a6864a813121ba23d8 2024-12-10T03:34:38,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/0d36721194af439f9bfadb425d899270 is 50, key is test_row_0/C:col10/1733801676308/Put/seqid=0 2024-12-10T03:34:38,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742093_1269 (size=12001) 2024-12-10T03:34:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T03:34:38,425 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/0d36721194af439f9bfadb425d899270 2024-12-10T03:34:38,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:38,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/0832cc39a509449dbbd0647bd81558d5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0832cc39a509449dbbd0647bd81558d5 2024-12-10T03:34:38,432 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0832cc39a509449dbbd0647bd81558d5, entries=150, sequenceid=90, filesize=11.7 K 2024-12-10T03:34:38,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/4c2b214a0a1f44a6864a813121ba23d8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/4c2b214a0a1f44a6864a813121ba23d8 2024-12-10T03:34:38,442 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/4c2b214a0a1f44a6864a813121ba23d8, entries=150, sequenceid=90, filesize=11.7 K 2024-12-10T03:34:38,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/0d36721194af439f9bfadb425d899270 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0d36721194af439f9bfadb425d899270 2024-12-10T03:34:38,447 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0d36721194af439f9bfadb425d899270, entries=150, sequenceid=90, filesize=11.7 K 2024-12-10T03:34:38,448 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=46.96 KB/48090 for 782122bcf84a1b8761da14e17014109c in 861ms, sequenceid=90, compaction requested=true 2024-12-10T03:34:38,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:38,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:38,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-10T03:34:38,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-10T03:34:38,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:38,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:34:38,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:38,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:38,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:38,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:38,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:38,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:38,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-10T03:34:38,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2330 sec 2024-12-10T03:34:38,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 2.2370 sec 2024-12-10T03:34:38,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/737e9fc519b1442284e3e2ae14c3c978 is 50, key is test_row_0/A:col10/1733801678449/Put/seqid=0 2024-12-10T03:34:38,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742094_1270 (size=12001) 2024-12-10T03:34:38,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:38,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801738506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:38,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:38,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801738609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:38,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:38,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801738813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:38,866 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/737e9fc519b1442284e3e2ae14c3c978 2024-12-10T03:34:38,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/b27a6f80cca84580bb8df4e0b092a743 is 50, key is test_row_0/B:col10/1733801678449/Put/seqid=0 2024-12-10T03:34:38,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742095_1271 (size=12001) 2024-12-10T03:34:39,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801739023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:39,025 DEBUG [Thread-1162 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4120 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:34:39,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801739033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:39,034 DEBUG [Thread-1160 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:34:39,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801739037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:39,039 DEBUG [Thread-1156 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:34:39,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801739040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:39,042 DEBUG [Thread-1154 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:34:39,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801739116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:39,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/b27a6f80cca84580bb8df4e0b092a743 2024-12-10T03:34:39,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/0ca79440029543908a55e5863e074497 is 50, key is test_row_0/C:col10/1733801678449/Put/seqid=0 2024-12-10T03:34:39,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742096_1272 (size=12001) 2024-12-10T03:34:39,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:39,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801739619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:39,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/0ca79440029543908a55e5863e074497 2024-12-10T03:34:39,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/737e9fc519b1442284e3e2ae14c3c978 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/737e9fc519b1442284e3e2ae14c3c978 2024-12-10T03:34:39,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/737e9fc519b1442284e3e2ae14c3c978, entries=150, sequenceid=101, filesize=11.7 K 2024-12-10T03:34:39,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/b27a6f80cca84580bb8df4e0b092a743 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/b27a6f80cca84580bb8df4e0b092a743 2024-12-10T03:34:39,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/b27a6f80cca84580bb8df4e0b092a743, entries=150, sequenceid=101, filesize=11.7 K 2024-12-10T03:34:39,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/0ca79440029543908a55e5863e074497 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0ca79440029543908a55e5863e074497 2024-12-10T03:34:39,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0ca79440029543908a55e5863e074497, entries=150, sequenceid=101, filesize=11.7 K 2024-12-10T03:34:39,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 782122bcf84a1b8761da14e17014109c in 1259ms, sequenceid=101, compaction requested=true 2024-12-10T03:34:39,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:39,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:39,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:39,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:39,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:39,709 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:39,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:39,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:39,709 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:39,710 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:39,710 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/B is initiating minor compaction (all files) 2024-12-10T03:34:39,710 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/B in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:39,710 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7351a3133450422590b906367b928a25, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/f1a0a1252d08406baa338e47ad1ddba0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/4c2b214a0a1f44a6864a813121ba23d8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/b27a6f80cca84580bb8df4e0b092a743] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=47.0 K 2024-12-10T03:34:39,710 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:39,710 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/A is initiating minor compaction (all files) 2024-12-10T03:34:39,711 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/A in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:39,711 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/c3a8cb37870c4ae99cae31310496cfe8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/8745a3902fea4f048e98d44f9d864926, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0832cc39a509449dbbd0647bd81558d5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/737e9fc519b1442284e3e2ae14c3c978] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=49.3 K 2024-12-10T03:34:39,711 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7351a3133450422590b906367b928a25, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733801672156 2024-12-10T03:34:39,711 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3a8cb37870c4ae99cae31310496cfe8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733801672156 2024-12-10T03:34:39,711 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f1a0a1252d08406baa338e47ad1ddba0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733801674905 2024-12-10T03:34:39,711 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8745a3902fea4f048e98d44f9d864926, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733801674902 2024-12-10T03:34:39,712 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0832cc39a509449dbbd0647bd81558d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801676297 2024-12-10T03:34:39,712 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c2b214a0a1f44a6864a813121ba23d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801676297 2024-12-10T03:34:39,712 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 737e9fc519b1442284e3e2ae14c3c978, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733801678433 2024-12-10T03:34:39,712 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting b27a6f80cca84580bb8df4e0b092a743, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733801678433 2024-12-10T03:34:39,720 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#B#compaction#228 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:39,720 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#A#compaction#229 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:39,720 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/93d49526de2c4dd38b0589c1ebd518c8 is 50, key is test_row_0/B:col10/1733801678449/Put/seqid=0 2024-12-10T03:34:39,720 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/1bfd4e8ce2904a84a23916a2d29f6f36 is 50, key is test_row_0/A:col10/1733801678449/Put/seqid=0 2024-12-10T03:34:39,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742097_1273 (size=12241) 2024-12-10T03:34:39,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742098_1274 (size=12241) 2024-12-10T03:34:39,737 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/93d49526de2c4dd38b0589c1ebd518c8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/93d49526de2c4dd38b0589c1ebd518c8 2024-12-10T03:34:39,741 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 782122bcf84a1b8761da14e17014109c/B of 782122bcf84a1b8761da14e17014109c into 93d49526de2c4dd38b0589c1ebd518c8(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:39,741 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:39,741 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/B, priority=12, startTime=1733801679709; duration=0sec 2024-12-10T03:34:39,741 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:39,741 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:B 2024-12-10T03:34:39,741 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:39,742 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:39,742 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/C is initiating minor compaction (all files) 2024-12-10T03:34:39,742 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/C in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:39,743 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/eadaba43668643ddbe0e5281e5fd047c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/19166a677a304cc7b7f7f89214630e3e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0d36721194af439f9bfadb425d899270, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0ca79440029543908a55e5863e074497] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=47.0 K 2024-12-10T03:34:39,743 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting eadaba43668643ddbe0e5281e5fd047c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733801672156 2024-12-10T03:34:39,743 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 19166a677a304cc7b7f7f89214630e3e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733801674905 2024-12-10T03:34:39,743 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d36721194af439f9bfadb425d899270, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801676297 2024-12-10T03:34:39,744 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ca79440029543908a55e5863e074497, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733801678433 2024-12-10T03:34:39,750 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#C#compaction#230 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:39,750 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/b53132fe502741169c29e459c523a0f8 is 50, key is test_row_0/C:col10/1733801678449/Put/seqid=0 2024-12-10T03:34:39,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742099_1275 (size=12241) 2024-12-10T03:34:40,129 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/1bfd4e8ce2904a84a23916a2d29f6f36 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1bfd4e8ce2904a84a23916a2d29f6f36 2024-12-10T03:34:40,133 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 782122bcf84a1b8761da14e17014109c/A of 782122bcf84a1b8761da14e17014109c into 1bfd4e8ce2904a84a23916a2d29f6f36(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:40,133 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:40,133 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/A, priority=12, startTime=1733801679709; duration=0sec 2024-12-10T03:34:40,133 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:40,133 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:A 2024-12-10T03:34:40,158 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/b53132fe502741169c29e459c523a0f8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/b53132fe502741169c29e459c523a0f8 2024-12-10T03:34:40,161 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 782122bcf84a1b8761da14e17014109c/C of 782122bcf84a1b8761da14e17014109c into b53132fe502741169c29e459c523a0f8(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:40,162 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:40,162 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/C, priority=12, startTime=1733801679709; duration=0sec 2024-12-10T03:34:40,162 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:40,162 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:C 2024-12-10T03:34:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T03:34:40,320 INFO [Thread-1164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-10T03:34:40,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-10T03:34:40,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T03:34:40,323 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:40,323 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:40,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:40,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T03:34:40,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:40,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T03:34:40,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:40,475 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:34:40,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:40,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:40,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:40,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:40,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:40,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:40,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/b2154635b82944dc9b6a7c71db33244e is 50, key is test_row_0/A:col10/1733801678497/Put/seqid=0 2024-12-10T03:34:40,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742100_1276 (size=12001) 2024-12-10T03:34:40,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T03:34:40,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:40,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:40,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801740637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:40,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:40,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801740739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:40,888 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/b2154635b82944dc9b6a7c71db33244e 2024-12-10T03:34:40,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/89edc3e2bc074173bae72decdcf9adb8 is 50, key is test_row_0/B:col10/1733801678497/Put/seqid=0 2024-12-10T03:34:40,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742101_1277 (size=12001) 2024-12-10T03:34:40,901 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/89edc3e2bc074173bae72decdcf9adb8 2024-12-10T03:34:40,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/ffb7ab777cd94d71882b089bb7f5b5cf is 50, key is test_row_0/C:col10/1733801678497/Put/seqid=0 2024-12-10T03:34:40,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742102_1278 (size=12001) 2024-12-10T03:34:40,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T03:34:40,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:40,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801740942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:41,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:41,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801741246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:41,312 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/ffb7ab777cd94d71882b089bb7f5b5cf 2024-12-10T03:34:41,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/b2154635b82944dc9b6a7c71db33244e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/b2154635b82944dc9b6a7c71db33244e 2024-12-10T03:34:41,352 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/b2154635b82944dc9b6a7c71db33244e, entries=150, sequenceid=129, filesize=11.7 K 2024-12-10T03:34:41,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/89edc3e2bc074173bae72decdcf9adb8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/89edc3e2bc074173bae72decdcf9adb8 2024-12-10T03:34:41,357 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/89edc3e2bc074173bae72decdcf9adb8, entries=150, sequenceid=129, filesize=11.7 K 2024-12-10T03:34:41,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/ffb7ab777cd94d71882b089bb7f5b5cf as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/ffb7ab777cd94d71882b089bb7f5b5cf 2024-12-10T03:34:41,361 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/ffb7ab777cd94d71882b089bb7f5b5cf, entries=150, sequenceid=129, filesize=11.7 K 2024-12-10T03:34:41,362 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 782122bcf84a1b8761da14e17014109c in 887ms, sequenceid=129, compaction requested=false 2024-12-10T03:34:41,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:41,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:41,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-10T03:34:41,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-10T03:34:41,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-10T03:34:41,363 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0400 sec 2024-12-10T03:34:41,364 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.0430 sec 2024-12-10T03:34:41,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T03:34:41,424 INFO [Thread-1164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-10T03:34:41,425 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:41,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-10T03:34:41,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T03:34:41,426 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:41,426 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:41,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:41,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T03:34:41,577 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:41,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-10T03:34:41,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:41,578 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:34:41,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:41,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:41,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:41,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:41,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:41,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:41,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/86cd7fcb0a414bb8a40c2ce553d68173 is 50, key is test_row_0/A:col10/1733801680635/Put/seqid=0 2024-12-10T03:34:41,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742103_1279 (size=12151) 2024-12-10T03:34:41,586 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/86cd7fcb0a414bb8a40c2ce553d68173 2024-12-10T03:34:41,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/6bd27408c66f44c48a64f53d27afe175 is 50, key is test_row_0/B:col10/1733801680635/Put/seqid=0 2024-12-10T03:34:41,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742104_1280 (size=12151) 2024-12-10T03:34:41,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T03:34:41,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:41,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:41,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:41,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801741800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:41,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:41,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801741902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:41,995 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/6bd27408c66f44c48a64f53d27afe175 2024-12-10T03:34:42,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/10f35e9a499a4d63880e132bdc928440 is 50, key is test_row_0/C:col10/1733801680635/Put/seqid=0 2024-12-10T03:34:42,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742105_1281 (size=12151) 2024-12-10T03:34:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T03:34:42,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801742104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:42,406 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/10f35e9a499a4d63880e132bdc928440 2024-12-10T03:34:42,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:42,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801742409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:42,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/86cd7fcb0a414bb8a40c2ce553d68173 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/86cd7fcb0a414bb8a40c2ce553d68173 2024-12-10T03:34:42,414 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/86cd7fcb0a414bb8a40c2ce553d68173, entries=150, sequenceid=140, filesize=11.9 K 2024-12-10T03:34:42,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/6bd27408c66f44c48a64f53d27afe175 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/6bd27408c66f44c48a64f53d27afe175 2024-12-10T03:34:42,418 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/6bd27408c66f44c48a64f53d27afe175, entries=150, sequenceid=140, filesize=11.9 K 2024-12-10T03:34:42,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/10f35e9a499a4d63880e132bdc928440 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/10f35e9a499a4d63880e132bdc928440 2024-12-10T03:34:42,422 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/10f35e9a499a4d63880e132bdc928440, entries=150, sequenceid=140, filesize=11.9 K 2024-12-10T03:34:42,423 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 782122bcf84a1b8761da14e17014109c in 845ms, sequenceid=140, compaction requested=true 2024-12-10T03:34:42,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:42,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:42,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-10T03:34:42,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-10T03:34:42,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-10T03:34:42,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 997 msec 2024-12-10T03:34:42,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.0000 sec 2024-12-10T03:34:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T03:34:42,529 INFO [Thread-1164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-10T03:34:42,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-10T03:34:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T03:34:42,531 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:42,531 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:42,531 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:42,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T03:34:42,682 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:42,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-10T03:34:42,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:42,683 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:34:42,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:42,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:42,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:42,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:42,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:42,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:42,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/12e95abfb0ba4cde837cdc4c5b3efede is 50, key is test_row_0/A:col10/1733801681798/Put/seqid=0 2024-12-10T03:34:42,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742106_1282 (size=12151) 2024-12-10T03:34:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T03:34:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:42,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:42,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801742926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:43,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:43,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801743029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:43,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:43,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36768 deadline: 1733801743032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:43,033 DEBUG [Thread-1162 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:34:43,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:43,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36704 deadline: 1733801743050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:43,052 DEBUG [Thread-1160 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:34:43,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36702 deadline: 1733801743057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:43,059 DEBUG [Thread-1154 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:34:43,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36786 deadline: 1733801743074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:43,076 DEBUG [Thread-1156 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:34:43,113 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/12e95abfb0ba4cde837cdc4c5b3efede 2024-12-10T03:34:43,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/ce554981aed54611949ee2218b77ef6e is 50, key is test_row_0/B:col10/1733801681798/Put/seqid=0 2024-12-10T03:34:43,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742107_1283 (size=12151) 2024-12-10T03:34:43,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T03:34:43,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:43,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801743231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:43,532 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/ce554981aed54611949ee2218b77ef6e 2024-12-10T03:34:43,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:43,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801743535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:43,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/5dfdf4a05c384efe9267660863a18337 is 50, key is test_row_0/C:col10/1733801681798/Put/seqid=0 2024-12-10T03:34:43,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742108_1284 (size=12151) 2024-12-10T03:34:43,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T03:34:43,957 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/5dfdf4a05c384efe9267660863a18337 2024-12-10T03:34:43,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/12e95abfb0ba4cde837cdc4c5b3efede as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/12e95abfb0ba4cde837cdc4c5b3efede 2024-12-10T03:34:43,964 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/12e95abfb0ba4cde837cdc4c5b3efede, entries=150, sequenceid=165, filesize=11.9 K 2024-12-10T03:34:43,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/ce554981aed54611949ee2218b77ef6e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ce554981aed54611949ee2218b77ef6e 2024-12-10T03:34:43,969 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ce554981aed54611949ee2218b77ef6e, entries=150, sequenceid=165, filesize=11.9 K 2024-12-10T03:34:43,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/5dfdf4a05c384efe9267660863a18337 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/5dfdf4a05c384efe9267660863a18337 2024-12-10T03:34:43,973 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/5dfdf4a05c384efe9267660863a18337, entries=150, sequenceid=165, filesize=11.9 K 2024-12-10T03:34:43,974 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 782122bcf84a1b8761da14e17014109c in 1291ms, sequenceid=165, compaction requested=true 2024-12-10T03:34:43,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:43,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:43,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-10T03:34:43,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-10T03:34:43,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-10T03:34:43,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4440 sec 2024-12-10T03:34:43,977 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.4460 sec 2024-12-10T03:34:44,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:44,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T03:34:44,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:44,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:44,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:44,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:44,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:44,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:44,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/e1e8d03531ad42daa5546546ec800cda is 50, key is test_row_0/A:col10/1733801682923/Put/seqid=0 2024-12-10T03:34:44,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742109_1285 (size=14541) 2024-12-10T03:34:44,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/e1e8d03531ad42daa5546546ec800cda 2024-12-10T03:34:44,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/7829ef488b1349849ac0704e423baf36 is 50, key is test_row_0/B:col10/1733801682923/Put/seqid=0 2024-12-10T03:34:44,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742110_1286 (size=12151) 2024-12-10T03:34:44,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/7829ef488b1349849ac0704e423baf36 2024-12-10T03:34:44,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f19a334148544697945365832ff6c27f is 50, key is test_row_0/C:col10/1733801682923/Put/seqid=0 2024-12-10T03:34:44,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742111_1287 (size=12151) 2024-12-10T03:34:44,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801744104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:44,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:44,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801744207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:44,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:44,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801744409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:44,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f19a334148544697945365832ff6c27f 2024-12-10T03:34:44,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/e1e8d03531ad42daa5546546ec800cda as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/e1e8d03531ad42daa5546546ec800cda 2024-12-10T03:34:44,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/e1e8d03531ad42daa5546546ec800cda, entries=200, sequenceid=177, filesize=14.2 K 2024-12-10T03:34:44,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/7829ef488b1349849ac0704e423baf36 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7829ef488b1349849ac0704e423baf36 2024-12-10T03:34:44,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7829ef488b1349849ac0704e423baf36, entries=150, sequenceid=177, filesize=11.9 K 2024-12-10T03:34:44,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f19a334148544697945365832ff6c27f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f19a334148544697945365832ff6c27f 2024-12-10T03:34:44,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f19a334148544697945365832ff6c27f, entries=150, sequenceid=177, filesize=11.9 K 2024-12-10T03:34:44,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 782122bcf84a1b8761da14e17014109c in 447ms, sequenceid=177, compaction requested=true 2024-12-10T03:34:44,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:44,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:44,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:44,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:44,487 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:34:44,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:44,487 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:34:44,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:44,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:44,488 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60695 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:34:44,488 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 63085 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:34:44,488 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/A is initiating minor compaction (all files) 2024-12-10T03:34:44,488 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/B is initiating minor compaction (all files) 2024-12-10T03:34:44,488 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/B in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:44,488 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/A in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:44,489 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1bfd4e8ce2904a84a23916a2d29f6f36, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/b2154635b82944dc9b6a7c71db33244e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/86cd7fcb0a414bb8a40c2ce553d68173, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/12e95abfb0ba4cde837cdc4c5b3efede, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/e1e8d03531ad42daa5546546ec800cda] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=61.6 K 2024-12-10T03:34:44,489 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/93d49526de2c4dd38b0589c1ebd518c8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/89edc3e2bc074173bae72decdcf9adb8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/6bd27408c66f44c48a64f53d27afe175, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ce554981aed54611949ee2218b77ef6e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7829ef488b1349849ac0704e423baf36] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=59.3 K 2024-12-10T03:34:44,489 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bfd4e8ce2904a84a23916a2d29f6f36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733801678433 2024-12-10T03:34:44,489 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 93d49526de2c4dd38b0589c1ebd518c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733801678433 2024-12-10T03:34:44,489 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 89edc3e2bc074173bae72decdcf9adb8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733801678497 2024-12-10T03:34:44,489 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2154635b82944dc9b6a7c71db33244e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733801678497 2024-12-10T03:34:44,489 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bd27408c66f44c48a64f53d27afe175, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733801680632 2024-12-10T03:34:44,489 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 86cd7fcb0a414bb8a40c2ce553d68173, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733801680632 2024-12-10T03:34:44,490 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ce554981aed54611949ee2218b77ef6e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733801681796 2024-12-10T03:34:44,490 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12e95abfb0ba4cde837cdc4c5b3efede, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733801681796 2024-12-10T03:34:44,490 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7829ef488b1349849ac0704e423baf36, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733801682923 2024-12-10T03:34:44,490 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1e8d03531ad42daa5546546ec800cda, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733801682919 2024-12-10T03:34:44,499 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#B#compaction#243 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:44,500 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/ab84a2d48275426b9c9511b350b29072 is 50, key is test_row_0/B:col10/1733801682923/Put/seqid=0 2024-12-10T03:34:44,514 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#A#compaction#244 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:44,514 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/6d9ff2776b7749fd97fc61b8d3f4f867 is 50, key is test_row_0/A:col10/1733801682923/Put/seqid=0 2024-12-10T03:34:44,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742112_1288 (size=12561) 2024-12-10T03:34:44,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742113_1289 (size=12561) 2024-12-10T03:34:44,523 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/6d9ff2776b7749fd97fc61b8d3f4f867 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/6d9ff2776b7749fd97fc61b8d3f4f867 2024-12-10T03:34:44,527 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 782122bcf84a1b8761da14e17014109c/A of 782122bcf84a1b8761da14e17014109c into 6d9ff2776b7749fd97fc61b8d3f4f867(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:44,527 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:44,527 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/A, priority=11, startTime=1733801684487; duration=0sec 2024-12-10T03:34:44,527 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:44,527 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:A 2024-12-10T03:34:44,527 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:34:44,529 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60695 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:34:44,529 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/C is initiating minor compaction (all files) 2024-12-10T03:34:44,529 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/C in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:44,529 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/b53132fe502741169c29e459c523a0f8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/ffb7ab777cd94d71882b089bb7f5b5cf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/10f35e9a499a4d63880e132bdc928440, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/5dfdf4a05c384efe9267660863a18337, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f19a334148544697945365832ff6c27f] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=59.3 K 2024-12-10T03:34:44,529 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b53132fe502741169c29e459c523a0f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733801678433 2024-12-10T03:34:44,529 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffb7ab777cd94d71882b089bb7f5b5cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733801678497 2024-12-10T03:34:44,530 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10f35e9a499a4d63880e132bdc928440, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1733801680632 2024-12-10T03:34:44,530 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dfdf4a05c384efe9267660863a18337, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733801681796 2024-12-10T03:34:44,530 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting f19a334148544697945365832ff6c27f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733801682923 2024-12-10T03:34:44,538 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#C#compaction#245 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:44,539 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/377503f9f5f44dbf9943806b7fb2a2e9 is 50, key is test_row_0/C:col10/1733801682923/Put/seqid=0 2024-12-10T03:34:44,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742114_1290 (size=12561) 2024-12-10T03:34:44,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T03:34:44,634 INFO [Thread-1164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-10T03:34:44,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:44,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-10T03:34:44,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T03:34:44,636 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:44,636 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:44,636 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:44,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:44,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:34:44,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:44,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:44,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:44,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:44,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:44,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:44,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/9bda0244a916486e90e473c7ff441fba is 50, key is test_row_0/A:col10/1733801684712/Put/seqid=0 2024-12-10T03:34:44,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:44,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801744731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:44,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742115_1291 (size=14541) 2024-12-10T03:34:44,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T03:34:44,789 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:44,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:44,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:44,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:44,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:44,790 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:44,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:44,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:44,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:44,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801744834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:44,921 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/ab84a2d48275426b9c9511b350b29072 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ab84a2d48275426b9c9511b350b29072 2024-12-10T03:34:44,925 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 782122bcf84a1b8761da14e17014109c/B of 782122bcf84a1b8761da14e17014109c into ab84a2d48275426b9c9511b350b29072(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:44,925 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:44,925 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/B, priority=11, startTime=1733801684487; duration=0sec 2024-12-10T03:34:44,925 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:44,925 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:B 2024-12-10T03:34:44,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T03:34:44,942 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:44,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:44,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:44,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:44,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:44,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:44,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:44,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:44,946 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/377503f9f5f44dbf9943806b7fb2a2e9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/377503f9f5f44dbf9943806b7fb2a2e9 2024-12-10T03:34:44,950 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 782122bcf84a1b8761da14e17014109c/C of 782122bcf84a1b8761da14e17014109c into 377503f9f5f44dbf9943806b7fb2a2e9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:44,950 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:44,950 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/C, priority=11, startTime=1733801684487; duration=0sec 2024-12-10T03:34:44,950 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:44,950 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:C 2024-12-10T03:34:45,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:45,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801745037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,094 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:45,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:45,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/9bda0244a916486e90e473c7ff441fba 2024-12-10T03:34:45,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/33cd6d5348ff410aa568f7f4186df915 is 50, key is test_row_0/B:col10/1733801684712/Put/seqid=0 2024-12-10T03:34:45,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742116_1292 (size=12151) 2024-12-10T03:34:45,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T03:34:45,246 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:45,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:45,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:45,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801745340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,399 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:45,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:45,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/33cd6d5348ff410aa568f7f4186df915 2024-12-10T03:34:45,551 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:45,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:45,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f7b61af0ca1742cca04af1ee6765572e is 50, key is test_row_0/C:col10/1733801684712/Put/seqid=0 2024-12-10T03:34:45,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742117_1293 (size=12151) 2024-12-10T03:34:45,704 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:45,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:45,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T03:34:45,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801745842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,856 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:45,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:45,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:45,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:45,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:45,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f7b61af0ca1742cca04af1ee6765572e 2024-12-10T03:34:45,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/9bda0244a916486e90e473c7ff441fba as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9bda0244a916486e90e473c7ff441fba 2024-12-10T03:34:45,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9bda0244a916486e90e473c7ff441fba, entries=200, sequenceid=203, filesize=14.2 K 2024-12-10T03:34:45,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/33cd6d5348ff410aa568f7f4186df915 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/33cd6d5348ff410aa568f7f4186df915 2024-12-10T03:34:45,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/33cd6d5348ff410aa568f7f4186df915, entries=150, sequenceid=203, filesize=11.9 K 2024-12-10T03:34:45,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/f7b61af0ca1742cca04af1ee6765572e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f7b61af0ca1742cca04af1ee6765572e 2024-12-10T03:34:45,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f7b61af0ca1742cca04af1ee6765572e, entries=150, sequenceid=203, filesize=11.9 K 2024-12-10T03:34:45,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 782122bcf84a1b8761da14e17014109c in 1261ms, sequenceid=203, compaction requested=false 2024-12-10T03:34:45,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:46,008 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:46,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T03:34:46,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:46,009 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:34:46,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:46,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:46,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:46,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:46,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:46,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:46,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/5ff06ae1813f4f69b400688167953163 is 50, key is test_row_0/A:col10/1733801684728/Put/seqid=0 2024-12-10T03:34:46,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742118_1294 (size=12151) 2024-12-10T03:34:46,416 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/5ff06ae1813f4f69b400688167953163 2024-12-10T03:34:46,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/2471f45757a94ae6b6948ca8e47b47e7 is 50, key is test_row_0/B:col10/1733801684728/Put/seqid=0 2024-12-10T03:34:46,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742119_1295 (size=12151) 2024-12-10T03:34:46,456 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/2471f45757a94ae6b6948ca8e47b47e7 2024-12-10T03:34:46,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/82eca143f0b14d2eb8b1da8705e863c5 is 50, key is test_row_0/C:col10/1733801684728/Put/seqid=0 2024-12-10T03:34:46,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742120_1296 (size=12151) 2024-12-10T03:34:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T03:34:46,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:46,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:46,864 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/82eca143f0b14d2eb8b1da8705e863c5 2024-12-10T03:34:46,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/5ff06ae1813f4f69b400688167953163 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/5ff06ae1813f4f69b400688167953163 2024-12-10T03:34:46,871 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/5ff06ae1813f4f69b400688167953163, entries=150, sequenceid=216, filesize=11.9 K 2024-12-10T03:34:46,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/2471f45757a94ae6b6948ca8e47b47e7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2471f45757a94ae6b6948ca8e47b47e7 2024-12-10T03:34:46,875 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2471f45757a94ae6b6948ca8e47b47e7, entries=150, sequenceid=216, filesize=11.9 K 2024-12-10T03:34:46,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/82eca143f0b14d2eb8b1da8705e863c5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/82eca143f0b14d2eb8b1da8705e863c5 2024-12-10T03:34:46,880 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/82eca143f0b14d2eb8b1da8705e863c5, entries=150, sequenceid=216, filesize=11.9 K 2024-12-10T03:34:46,881 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=107.34 KB/109920 for 782122bcf84a1b8761da14e17014109c in 872ms, sequenceid=216, compaction requested=true 2024-12-10T03:34:46,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:46,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:46,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-10T03:34:46,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-10T03:34:46,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-10T03:34:46,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2460 sec 2024-12-10T03:34:46,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:46,884 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-10T03:34:46,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:46,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:46,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:46,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:46,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:46,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:46,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.2490 sec 2024-12-10T03:34:46,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/665d38d6479144f09fae1da9580cc187 is 50, key is test_row_0/A:col10/1733801686880/Put/seqid=0 2024-12-10T03:34:46,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742121_1297 (size=12151) 2024-12-10T03:34:46,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:46,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801746912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:47,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:47,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801747015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:47,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:47,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801747219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:47,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/665d38d6479144f09fae1da9580cc187 2024-12-10T03:34:47,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/715e02e574cb4dcc923929ce4c89c60e is 50, key is test_row_0/B:col10/1733801686880/Put/seqid=0 2024-12-10T03:34:47,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742122_1298 (size=12151) 2024-12-10T03:34:47,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:47,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801747520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:47,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/715e02e574cb4dcc923929ce4c89c60e 2024-12-10T03:34:47,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/30e57998249f469982cd658ed8d75be2 is 50, key is test_row_0/C:col10/1733801686880/Put/seqid=0 2024-12-10T03:34:47,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742123_1299 (size=12151) 2024-12-10T03:34:48,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:48,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801748022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:48,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/30e57998249f469982cd658ed8d75be2 2024-12-10T03:34:48,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/665d38d6479144f09fae1da9580cc187 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/665d38d6479144f09fae1da9580cc187 2024-12-10T03:34:48,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/665d38d6479144f09fae1da9580cc187, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T03:34:48,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/715e02e574cb4dcc923929ce4c89c60e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/715e02e574cb4dcc923929ce4c89c60e 2024-12-10T03:34:48,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/715e02e574cb4dcc923929ce4c89c60e, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T03:34:48,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/30e57998249f469982cd658ed8d75be2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/30e57998249f469982cd658ed8d75be2 2024-12-10T03:34:48,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/30e57998249f469982cd658ed8d75be2, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T03:34:48,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 782122bcf84a1b8761da14e17014109c in 1244ms, sequenceid=237, compaction requested=true 2024-12-10T03:34:48,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:48,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:48,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:48,129 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:48,129 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:48,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:48,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:48,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:48,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:48,130 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51404 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:48,130 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:48,130 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/A is initiating minor compaction (all files) 2024-12-10T03:34:48,130 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/B is initiating minor compaction (all files) 2024-12-10T03:34:48,130 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/B in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:48,130 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/A in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:48,130 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ab84a2d48275426b9c9511b350b29072, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/33cd6d5348ff410aa568f7f4186df915, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2471f45757a94ae6b6948ca8e47b47e7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/715e02e574cb4dcc923929ce4c89c60e] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=47.9 K 2024-12-10T03:34:48,130 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/6d9ff2776b7749fd97fc61b8d3f4f867, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9bda0244a916486e90e473c7ff441fba, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/5ff06ae1813f4f69b400688167953163, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/665d38d6479144f09fae1da9580cc187] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=50.2 K 2024-12-10T03:34:48,130 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d9ff2776b7749fd97fc61b8d3f4f867, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733801682923 2024-12-10T03:34:48,130 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ab84a2d48275426b9c9511b350b29072, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733801682923 2024-12-10T03:34:48,130 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bda0244a916486e90e473c7ff441fba, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733801684091 2024-12-10T03:34:48,130 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 33cd6d5348ff410aa568f7f4186df915, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733801684091 2024-12-10T03:34:48,131 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ff06ae1813f4f69b400688167953163, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733801684721 2024-12-10T03:34:48,131 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2471f45757a94ae6b6948ca8e47b47e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733801684721 2024-12-10T03:34:48,131 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 665d38d6479144f09fae1da9580cc187, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801686878 2024-12-10T03:34:48,131 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 715e02e574cb4dcc923929ce4c89c60e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801686878 2024-12-10T03:34:48,141 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#B#compaction#255 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:48,141 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/9cc2c5d632d34baf8a1418306d68da1f is 50, key is test_row_0/B:col10/1733801686880/Put/seqid=0 2024-12-10T03:34:48,145 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#A#compaction#256 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:48,145 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/4649dc792db24989a08776a04af8eead is 50, key is test_row_0/A:col10/1733801686880/Put/seqid=0 2024-12-10T03:34:48,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742124_1300 (size=12697) 2024-12-10T03:34:48,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742125_1301 (size=12697) 2024-12-10T03:34:48,566 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/9cc2c5d632d34baf8a1418306d68da1f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/9cc2c5d632d34baf8a1418306d68da1f 2024-12-10T03:34:48,569 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/4649dc792db24989a08776a04af8eead as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/4649dc792db24989a08776a04af8eead 2024-12-10T03:34:48,571 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 782122bcf84a1b8761da14e17014109c/B of 782122bcf84a1b8761da14e17014109c into 9cc2c5d632d34baf8a1418306d68da1f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:48,571 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:48,571 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/B, priority=12, startTime=1733801688129; duration=0sec 2024-12-10T03:34:48,571 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:48,571 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:B 2024-12-10T03:34:48,571 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:34:48,572 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:34:48,572 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/C is initiating minor compaction (all files) 2024-12-10T03:34:48,573 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/C in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:48,573 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/377503f9f5f44dbf9943806b7fb2a2e9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f7b61af0ca1742cca04af1ee6765572e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/82eca143f0b14d2eb8b1da8705e863c5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/30e57998249f469982cd658ed8d75be2] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=47.9 K 2024-12-10T03:34:48,573 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 782122bcf84a1b8761da14e17014109c/A of 782122bcf84a1b8761da14e17014109c into 4649dc792db24989a08776a04af8eead(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:48,573 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:48,573 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/A, priority=12, startTime=1733801688128; duration=0sec 2024-12-10T03:34:48,573 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:48,573 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:A 2024-12-10T03:34:48,573 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 377503f9f5f44dbf9943806b7fb2a2e9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733801682923 2024-12-10T03:34:48,573 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f7b61af0ca1742cca04af1ee6765572e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733801684091 2024-12-10T03:34:48,573 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 82eca143f0b14d2eb8b1da8705e863c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733801684721 2024-12-10T03:34:48,574 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 30e57998249f469982cd658ed8d75be2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801686878 2024-12-10T03:34:48,580 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#C#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:48,580 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/3f4387db90d84b25b91f0e10c33c4009 is 50, key is test_row_0/C:col10/1733801686880/Put/seqid=0 2024-12-10T03:34:48,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742126_1302 (size=12697) 2024-12-10T03:34:48,589 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/3f4387db90d84b25b91f0e10c33c4009 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/3f4387db90d84b25b91f0e10c33c4009 2024-12-10T03:34:48,593 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 782122bcf84a1b8761da14e17014109c/C of 782122bcf84a1b8761da14e17014109c into 3f4387db90d84b25b91f0e10c33c4009(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:48,593 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:48,593 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/C, priority=12, startTime=1733801688129; duration=0sec 2024-12-10T03:34:48,593 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:48,593 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:C 2024-12-10T03:34:48,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T03:34:48,740 INFO [Thread-1164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-10T03:34:48,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:48,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-10T03:34:48,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T03:34:48,743 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:48,744 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:48,744 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:48,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T03:34:48,895 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:48,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-10T03:34:48,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:48,895 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T03:34:48,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:48,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:48,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:48,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:48,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:48,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:48,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/0fcf49e089084e9c9b05ea199c1ecc0c is 50, key is test_row_0/A:col10/1733801686907/Put/seqid=0 2024-12-10T03:34:48,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742127_1303 (size=12151) 2024-12-10T03:34:49,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:49,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T03:34:49,058 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:49,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801749057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:49,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:49,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801749159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:49,302 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/0fcf49e089084e9c9b05ea199c1ecc0c 2024-12-10T03:34:49,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/c034c8fe17cf4b68b0f46e8f9e39fe63 is 50, key is test_row_0/B:col10/1733801686907/Put/seqid=0 2024-12-10T03:34:49,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742128_1304 (size=12151) 2024-12-10T03:34:49,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T03:34:49,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:49,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801749362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:49,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:49,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801749665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:49,711 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/c034c8fe17cf4b68b0f46e8f9e39fe63 2024-12-10T03:34:49,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/37a935d46b604d40866ee475d702bb82 is 50, key is test_row_0/C:col10/1733801686907/Put/seqid=0 2024-12-10T03:34:49,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742129_1305 (size=12151) 2024-12-10T03:34:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T03:34:50,125 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/37a935d46b604d40866ee475d702bb82 2024-12-10T03:34:50,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/0fcf49e089084e9c9b05ea199c1ecc0c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0fcf49e089084e9c9b05ea199c1ecc0c 2024-12-10T03:34:50,131 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0fcf49e089084e9c9b05ea199c1ecc0c, entries=150, sequenceid=255, filesize=11.9 K 2024-12-10T03:34:50,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/c034c8fe17cf4b68b0f46e8f9e39fe63 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c034c8fe17cf4b68b0f46e8f9e39fe63 2024-12-10T03:34:50,134 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c034c8fe17cf4b68b0f46e8f9e39fe63, entries=150, sequenceid=255, filesize=11.9 K 2024-12-10T03:34:50,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/37a935d46b604d40866ee475d702bb82 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/37a935d46b604d40866ee475d702bb82 2024-12-10T03:34:50,138 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/37a935d46b604d40866ee475d702bb82, entries=150, sequenceid=255, filesize=11.9 K 2024-12-10T03:34:50,139 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 782122bcf84a1b8761da14e17014109c in 1244ms, sequenceid=255, compaction requested=false 2024-12-10T03:34:50,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:50,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:50,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-10T03:34:50,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-10T03:34:50,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-10T03:34:50,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3960 sec 2024-12-10T03:34:50,142 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.4000 sec 2024-12-10T03:34:50,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:50,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T03:34:50,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:50,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:50,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:50,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:50,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:50,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:50,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/f6ff11b918f94177985785eed746c909 is 50, key is test_row_0/A:col10/1733801690169/Put/seqid=0 2024-12-10T03:34:50,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742130_1306 (size=12301) 2024-12-10T03:34:50,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:50,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801750195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:50,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801750297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:50,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:50,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801750500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:50,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/f6ff11b918f94177985785eed746c909 2024-12-10T03:34:50,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/2bd903aef91a435ea808d3a4205ebf4f is 50, key is test_row_0/B:col10/1733801690169/Put/seqid=0 2024-12-10T03:34:50,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742131_1307 (size=12301) 2024-12-10T03:34:50,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:50,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801750803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:50,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T03:34:50,845 INFO [Thread-1164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-10T03:34:50,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:50,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-10T03:34:50,847 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:50,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T03:34:50,847 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:50,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:50,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T03:34:50,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/2bd903aef91a435ea808d3a4205ebf4f 2024-12-10T03:34:50,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/2d36c3e86c714f3c908b030e9b547edd is 50, key is test_row_0/C:col10/1733801690169/Put/seqid=0 2024-12-10T03:34:50,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742132_1308 (size=12301) 2024-12-10T03:34:50,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/2d36c3e86c714f3c908b030e9b547edd 2024-12-10T03:34:50,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/f6ff11b918f94177985785eed746c909 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/f6ff11b918f94177985785eed746c909 2024-12-10T03:34:50,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:50,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T03:34:50,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:50,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:50,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:50,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:50,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:51,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:51,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/f6ff11b918f94177985785eed746c909, entries=150, sequenceid=277, filesize=12.0 K 2024-12-10T03:34:51,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/2bd903aef91a435ea808d3a4205ebf4f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2bd903aef91a435ea808d3a4205ebf4f 2024-12-10T03:34:51,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2bd903aef91a435ea808d3a4205ebf4f, entries=150, sequenceid=277, filesize=12.0 K 2024-12-10T03:34:51,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/2d36c3e86c714f3c908b030e9b547edd as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/2d36c3e86c714f3c908b030e9b547edd 2024-12-10T03:34:51,010 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/2d36c3e86c714f3c908b030e9b547edd, entries=150, sequenceid=277, filesize=12.0 K 2024-12-10T03:34:51,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 782122bcf84a1b8761da14e17014109c in 842ms, sequenceid=277, compaction requested=true 2024-12-10T03:34:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:51,011 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:51,011 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:34:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:51,013 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:51,013 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:51,013 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/A is initiating minor compaction (all files) 2024-12-10T03:34:51,013 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/B is initiating minor compaction (all files) 2024-12-10T03:34:51,013 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/B in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:51,013 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/A in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:51,013 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/4649dc792db24989a08776a04af8eead, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0fcf49e089084e9c9b05ea199c1ecc0c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/f6ff11b918f94177985785eed746c909] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=36.3 K 2024-12-10T03:34:51,013 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/9cc2c5d632d34baf8a1418306d68da1f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c034c8fe17cf4b68b0f46e8f9e39fe63, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2bd903aef91a435ea808d3a4205ebf4f] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=36.3 K 2024-12-10T03:34:51,013 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4649dc792db24989a08776a04af8eead, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801686878 2024-12-10T03:34:51,013 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cc2c5d632d34baf8a1418306d68da1f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801686878 2024-12-10T03:34:51,014 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c034c8fe17cf4b68b0f46e8f9e39fe63, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733801686907 2024-12-10T03:34:51,014 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fcf49e089084e9c9b05ea199c1ecc0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733801686907 2024-12-10T03:34:51,014 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bd903aef91a435ea808d3a4205ebf4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1733801689054 2024-12-10T03:34:51,014 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6ff11b918f94177985785eed746c909, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1733801689054 2024-12-10T03:34:51,019 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#B#compaction#264 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:51,020 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#A#compaction#265 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:51,020 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/1add28bfebf74e0ba892adf0d2f44d8c is 50, key is test_row_0/B:col10/1733801690169/Put/seqid=0 2024-12-10T03:34:51,020 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/01147a85c1864254b1c5337e36d7358e is 50, key is test_row_0/A:col10/1733801690169/Put/seqid=0 2024-12-10T03:34:51,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742134_1310 (size=12949) 2024-12-10T03:34:51,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742133_1309 (size=12949) 2024-12-10T03:34:51,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T03:34:51,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:51,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T03:34:51,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:51,151 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T03:34:51,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:51,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:51,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:51,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:51,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:51,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:51,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/7176368c53d94205834cee66499996f8 is 50, key is test_row_0/A:col10/1733801690182/Put/seqid=0 2024-12-10T03:34:51,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742135_1311 (size=12301) 2024-12-10T03:34:51,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. as already flushing 2024-12-10T03:34:51,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:51,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:51,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801751349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:51,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T03:34:51,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:51,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801751453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:51,457 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/1add28bfebf74e0ba892adf0d2f44d8c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/1add28bfebf74e0ba892adf0d2f44d8c 2024-12-10T03:34:51,457 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/01147a85c1864254b1c5337e36d7358e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/01147a85c1864254b1c5337e36d7358e 2024-12-10T03:34:51,462 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 782122bcf84a1b8761da14e17014109c/A of 782122bcf84a1b8761da14e17014109c into 01147a85c1864254b1c5337e36d7358e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:51,462 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:51,463 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/A, priority=13, startTime=1733801691011; duration=0sec 2024-12-10T03:34:51,463 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:34:51,463 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:A 2024-12-10T03:34:51,463 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:34:51,464 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:34:51,464 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 782122bcf84a1b8761da14e17014109c/C is initiating minor compaction (all files) 2024-12-10T03:34:51,464 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 782122bcf84a1b8761da14e17014109c/C in TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:51,464 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/3f4387db90d84b25b91f0e10c33c4009, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/37a935d46b604d40866ee475d702bb82, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/2d36c3e86c714f3c908b030e9b547edd] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp, totalSize=36.3 K 2024-12-10T03:34:51,465 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f4387db90d84b25b91f0e10c33c4009, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801686878 2024-12-10T03:34:51,465 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 782122bcf84a1b8761da14e17014109c/B of 782122bcf84a1b8761da14e17014109c into 1add28bfebf74e0ba892adf0d2f44d8c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:51,465 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37a935d46b604d40866ee475d702bb82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733801686907 2024-12-10T03:34:51,465 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:51,465 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/B, priority=13, startTime=1733801691011; duration=0sec 2024-12-10T03:34:51,465 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:51,465 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:B 2024-12-10T03:34:51,465 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d36c3e86c714f3c908b030e9b547edd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1733801689054 2024-12-10T03:34:51,471 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 782122bcf84a1b8761da14e17014109c#C#compaction#267 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:34:51,471 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/38d4c50d46194575b977e7bf634e4f85 is 50, key is test_row_0/C:col10/1733801690169/Put/seqid=0 2024-12-10T03:34:51,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742136_1312 (size=12949) 2024-12-10T03:34:51,558 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/7176368c53d94205834cee66499996f8 2024-12-10T03:34:51,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/45a595dd79c74de08c7574498f22605d is 50, key is test_row_0/B:col10/1733801690182/Put/seqid=0 2024-12-10T03:34:51,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742137_1313 (size=12301) 2024-12-10T03:34:51,567 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/45a595dd79c74de08c7574498f22605d 2024-12-10T03:34:51,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/a827edd7d93f49d8b73aed26aa17fd16 is 50, key is test_row_0/C:col10/1733801690182/Put/seqid=0 2024-12-10T03:34:51,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742138_1314 (size=12301) 2024-12-10T03:34:51,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:51,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801751657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:51,878 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/38d4c50d46194575b977e7bf634e4f85 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/38d4c50d46194575b977e7bf634e4f85 2024-12-10T03:34:51,882 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 782122bcf84a1b8761da14e17014109c/C of 782122bcf84a1b8761da14e17014109c into 38d4c50d46194575b977e7bf634e4f85(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:34:51,882 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:51,882 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c., storeName=782122bcf84a1b8761da14e17014109c/C, priority=13, startTime=1733801691011; duration=0sec 2024-12-10T03:34:51,883 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:51,883 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:C 2024-12-10T03:34:51,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T03:34:51,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:51,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36748 deadline: 1733801751959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:52,006 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/a827edd7d93f49d8b73aed26aa17fd16 2024-12-10T03:34:52,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/7176368c53d94205834cee66499996f8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/7176368c53d94205834cee66499996f8 2024-12-10T03:34:52,017 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/7176368c53d94205834cee66499996f8, entries=150, sequenceid=291, filesize=12.0 K 2024-12-10T03:34:52,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/45a595dd79c74de08c7574498f22605d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/45a595dd79c74de08c7574498f22605d 2024-12-10T03:34:52,020 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/45a595dd79c74de08c7574498f22605d, entries=150, sequenceid=291, filesize=12.0 K 2024-12-10T03:34:52,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/a827edd7d93f49d8b73aed26aa17fd16 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/a827edd7d93f49d8b73aed26aa17fd16 2024-12-10T03:34:52,024 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/a827edd7d93f49d8b73aed26aa17fd16, entries=150, sequenceid=291, filesize=12.0 K 2024-12-10T03:34:52,024 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 782122bcf84a1b8761da14e17014109c in 873ms, sequenceid=291, compaction requested=false 2024-12-10T03:34:52,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:52,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:52,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-10T03:34:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-10T03:34:52,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-10T03:34:52,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1780 sec 2024-12-10T03:34:52,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.1810 sec 2024-12-10T03:34:52,107 DEBUG [Thread-1173 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x521aad6f to 127.0.0.1:51621 2024-12-10T03:34:52,107 DEBUG [Thread-1171 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3677bd4f to 127.0.0.1:51621 2024-12-10T03:34:52,107 DEBUG [Thread-1171 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:52,107 DEBUG [Thread-1173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:52,108 DEBUG [Thread-1167 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:51621 2024-12-10T03:34:52,108 DEBUG [Thread-1167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:52,108 DEBUG [Thread-1165 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x036642cb to 127.0.0.1:51621 2024-12-10T03:34:52,108 DEBUG [Thread-1165 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:52,109 DEBUG [Thread-1169 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:51621 2024-12-10T03:34:52,109 DEBUG [Thread-1169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:52,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T03:34:52,470 DEBUG [Thread-1158 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:51621 2024-12-10T03:34:52,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:52,470 DEBUG [Thread-1158 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:52,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:52,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:52,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:52,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:52,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:52,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/2950a10625804cebb284280d88a75fb9 is 50, key is test_row_0/A:col10/1733801691326/Put/seqid=0 2024-12-10T03:34:52,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742139_1315 (size=12301) 2024-12-10T03:34:52,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/2950a10625804cebb284280d88a75fb9 2024-12-10T03:34:52,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/c3b205478b5d426a971e8793b0ece487 is 50, key is test_row_0/B:col10/1733801691326/Put/seqid=0 2024-12-10T03:34:52,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742140_1316 (size=12301) 2024-12-10T03:34:52,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T03:34:52,951 INFO [Thread-1164 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-10T03:34:53,076 DEBUG [Thread-1160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fef31f8 to 127.0.0.1:51621 2024-12-10T03:34:53,076 DEBUG [Thread-1162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:51621 2024-12-10T03:34:53,076 DEBUG [Thread-1160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:53,076 DEBUG [Thread-1162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:53,129 DEBUG [Thread-1156 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:51621 2024-12-10T03:34:53,129 DEBUG [Thread-1156 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:53,141 DEBUG [Thread-1154 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:51621 2024-12-10T03:34:53,141 DEBUG [Thread-1154 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:53,141 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T03:34:53,141 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 14 2024-12-10T03:34:53,141 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-12-10T03:34:53,141 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 194 2024-12-10T03:34:53,141 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-12-10T03:34:53,141 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-12-10T03:34:53,142 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T03:34:53,142 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9744 2024-12-10T03:34:53,142 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9622 2024-12-10T03:34:53,142 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9434 2024-12-10T03:34:53,142 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9724 2024-12-10T03:34:53,142 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9617 2024-12-10T03:34:53,142 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T03:34:53,142 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T03:34:53,142 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x028e73c0 to 127.0.0.1:51621 2024-12-10T03:34:53,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:34:53,143 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T03:34:53,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T03:34:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T03:34:53,147 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801693147"}]},"ts":"1733801693147"} 2024-12-10T03:34:53,149 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T03:34:53,167 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T03:34:53,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:34:53,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=782122bcf84a1b8761da14e17014109c, UNASSIGN}] 2024-12-10T03:34:53,171 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=85, ppid=84, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=782122bcf84a1b8761da14e17014109c, UNASSIGN 2024-12-10T03:34:53,172 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=85 updating hbase:meta row=782122bcf84a1b8761da14e17014109c, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:53,173 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:34:53,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; CloseRegionProcedure 782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T03:34:53,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/c3b205478b5d426a971e8793b0ece487 2024-12-10T03:34:53,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/48b9a9a97c78487cb983fd72007b0047 is 50, key is test_row_0/C:col10/1733801691326/Put/seqid=0 2024-12-10T03:34:53,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742141_1317 (size=12301) 2024-12-10T03:34:53,325 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:53,325 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(124): Close 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:53,325 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:34:53,325 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1681): Closing 782122bcf84a1b8761da14e17014109c, disabling compactions & flushes 2024-12-10T03:34:53,326 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T03:34:53,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/48b9a9a97c78487cb983fd72007b0047 2024-12-10T03:34:53,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/2950a10625804cebb284280d88a75fb9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/2950a10625804cebb284280d88a75fb9 2024-12-10T03:34:53,727 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/2950a10625804cebb284280d88a75fb9, entries=150, sequenceid=317, filesize=12.0 K 2024-12-10T03:34:53,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/c3b205478b5d426a971e8793b0ece487 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c3b205478b5d426a971e8793b0ece487 2024-12-10T03:34:53,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c3b205478b5d426a971e8793b0ece487, entries=150, sequenceid=317, filesize=12.0 K 2024-12-10T03:34:53,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/48b9a9a97c78487cb983fd72007b0047 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/48b9a9a97c78487cb983fd72007b0047 2024-12-10T03:34:53,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/48b9a9a97c78487cb983fd72007b0047, entries=150, sequenceid=317, filesize=12.0 K 2024-12-10T03:34:53,739 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=26.84 KB/27480 for 782122bcf84a1b8761da14e17014109c in 1270ms, sequenceid=317, compaction requested=true 2024-12-10T03:34:53,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:53,740 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:53,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. after waiting 0 ms 2024-12-10T03:34:53,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:53,740 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. because compaction request was cancelled 2024-12-10T03:34:53,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:53,740 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:A 2024-12-10T03:34:53,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:53,740 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. because compaction request was cancelled 2024-12-10T03:34:53,740 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(2837): Flushing 782122bcf84a1b8761da14e17014109c 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T03:34:53,740 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:B 2024-12-10T03:34:53,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 782122bcf84a1b8761da14e17014109c:C, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:34:53,740 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. because compaction request was cancelled 2024-12-10T03:34:53,740 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 782122bcf84a1b8761da14e17014109c:C 2024-12-10T03:34:53,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=A 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=B 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 782122bcf84a1b8761da14e17014109c, store=C 2024-12-10T03:34:53,740 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:53,744 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/844a9d5aa899467390182459ce999c0e is 50, key is test_row_0/A:col10/1733801693140/Put/seqid=0 2024-12-10T03:34:53,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742142_1318 (size=9857) 2024-12-10T03:34:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T03:34:54,150 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/844a9d5aa899467390182459ce999c0e 2024-12-10T03:34:54,162 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/609fefe75d3249ecb568b80a395e6c64 is 50, key is test_row_0/B:col10/1733801693140/Put/seqid=0 2024-12-10T03:34:54,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742143_1319 (size=9857) 2024-12-10T03:34:54,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T03:34:54,567 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/609fefe75d3249ecb568b80a395e6c64 2024-12-10T03:34:54,582 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/be290cd902544598bc56edd748133d8e is 50, key is test_row_0/C:col10/1733801693140/Put/seqid=0 2024-12-10T03:34:54,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742144_1320 (size=9857) 2024-12-10T03:34:54,987 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/be290cd902544598bc56edd748133d8e 2024-12-10T03:34:54,999 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/A/844a9d5aa899467390182459ce999c0e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/844a9d5aa899467390182459ce999c0e 2024-12-10T03:34:55,005 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/844a9d5aa899467390182459ce999c0e, entries=100, sequenceid=324, filesize=9.6 K 2024-12-10T03:34:55,006 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/B/609fefe75d3249ecb568b80a395e6c64 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/609fefe75d3249ecb568b80a395e6c64 2024-12-10T03:34:55,011 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/609fefe75d3249ecb568b80a395e6c64, entries=100, sequenceid=324, filesize=9.6 K 2024-12-10T03:34:55,012 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/.tmp/C/be290cd902544598bc56edd748133d8e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/be290cd902544598bc56edd748133d8e 2024-12-10T03:34:55,017 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/be290cd902544598bc56edd748133d8e, entries=100, sequenceid=324, filesize=9.6 K 2024-12-10T03:34:55,018 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 782122bcf84a1b8761da14e17014109c in 1278ms, sequenceid=324, compaction requested=true 2024-12-10T03:34:55,018 DEBUG [StoreCloser-TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1b38bc85273743b7828122d04940edc5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/db57c1cd6c1f4af3af45040bfbe8bee6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/c3a8cb37870c4ae99cae31310496cfe8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9e98016549944a14b6e67953a8bbf2a4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/8745a3902fea4f048e98d44f9d864926, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0832cc39a509449dbbd0647bd81558d5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1bfd4e8ce2904a84a23916a2d29f6f36, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/737e9fc519b1442284e3e2ae14c3c978, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/b2154635b82944dc9b6a7c71db33244e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/86cd7fcb0a414bb8a40c2ce553d68173, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/12e95abfb0ba4cde837cdc4c5b3efede, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/e1e8d03531ad42daa5546546ec800cda, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/6d9ff2776b7749fd97fc61b8d3f4f867, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9bda0244a916486e90e473c7ff441fba, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/5ff06ae1813f4f69b400688167953163, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/4649dc792db24989a08776a04af8eead, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/665d38d6479144f09fae1da9580cc187, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0fcf49e089084e9c9b05ea199c1ecc0c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/f6ff11b918f94177985785eed746c909] to archive 2024-12-10T03:34:55,020 DEBUG [StoreCloser-TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:55,022 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/db57c1cd6c1f4af3af45040bfbe8bee6 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/db57c1cd6c1f4af3af45040bfbe8bee6 2024-12-10T03:34:55,022 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1b38bc85273743b7828122d04940edc5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1b38bc85273743b7828122d04940edc5 2024-12-10T03:34:55,022 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/c3a8cb37870c4ae99cae31310496cfe8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/c3a8cb37870c4ae99cae31310496cfe8 2024-12-10T03:34:55,023 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/8745a3902fea4f048e98d44f9d864926 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/8745a3902fea4f048e98d44f9d864926 2024-12-10T03:34:55,023 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0832cc39a509449dbbd0647bd81558d5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0832cc39a509449dbbd0647bd81558d5 2024-12-10T03:34:55,023 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9e98016549944a14b6e67953a8bbf2a4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9e98016549944a14b6e67953a8bbf2a4 2024-12-10T03:34:55,023 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/737e9fc519b1442284e3e2ae14c3c978 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/737e9fc519b1442284e3e2ae14c3c978 2024-12-10T03:34:55,023 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1bfd4e8ce2904a84a23916a2d29f6f36 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/1bfd4e8ce2904a84a23916a2d29f6f36 2024-12-10T03:34:55,025 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/b2154635b82944dc9b6a7c71db33244e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/b2154635b82944dc9b6a7c71db33244e 2024-12-10T03:34:55,025 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/6d9ff2776b7749fd97fc61b8d3f4f867 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/6d9ff2776b7749fd97fc61b8d3f4f867 2024-12-10T03:34:55,025 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/e1e8d03531ad42daa5546546ec800cda to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/e1e8d03531ad42daa5546546ec800cda 2024-12-10T03:34:55,025 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/5ff06ae1813f4f69b400688167953163 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/5ff06ae1813f4f69b400688167953163 2024-12-10T03:34:55,025 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/86cd7fcb0a414bb8a40c2ce553d68173 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/86cd7fcb0a414bb8a40c2ce553d68173 2024-12-10T03:34:55,025 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9bda0244a916486e90e473c7ff441fba to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/9bda0244a916486e90e473c7ff441fba 2024-12-10T03:34:55,026 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/12e95abfb0ba4cde837cdc4c5b3efede to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/12e95abfb0ba4cde837cdc4c5b3efede 2024-12-10T03:34:55,026 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/4649dc792db24989a08776a04af8eead to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/4649dc792db24989a08776a04af8eead 2024-12-10T03:34:55,026 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/f6ff11b918f94177985785eed746c909 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/f6ff11b918f94177985785eed746c909 2024-12-10T03:34:55,027 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/665d38d6479144f09fae1da9580cc187 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/665d38d6479144f09fae1da9580cc187 2024-12-10T03:34:55,027 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0fcf49e089084e9c9b05ea199c1ecc0c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/0fcf49e089084e9c9b05ea199c1ecc0c 2024-12-10T03:34:55,028 DEBUG [StoreCloser-TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/29605a8053614a7d880c7b8bbd203e64, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/49b6fc6a00f747c9a56adfd0b4429eff, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7351a3133450422590b906367b928a25, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/cd00d83b691e474fabe13c68a8a4bfe7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/f1a0a1252d08406baa338e47ad1ddba0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/4c2b214a0a1f44a6864a813121ba23d8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/93d49526de2c4dd38b0589c1ebd518c8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/b27a6f80cca84580bb8df4e0b092a743, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/89edc3e2bc074173bae72decdcf9adb8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/6bd27408c66f44c48a64f53d27afe175, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ce554981aed54611949ee2218b77ef6e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ab84a2d48275426b9c9511b350b29072, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7829ef488b1349849ac0704e423baf36, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/33cd6d5348ff410aa568f7f4186df915, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2471f45757a94ae6b6948ca8e47b47e7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/9cc2c5d632d34baf8a1418306d68da1f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/715e02e574cb4dcc923929ce4c89c60e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c034c8fe17cf4b68b0f46e8f9e39fe63, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2bd903aef91a435ea808d3a4205ebf4f] to archive 2024-12-10T03:34:55,028 DEBUG [StoreCloser-TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:55,030 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/29605a8053614a7d880c7b8bbd203e64 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/29605a8053614a7d880c7b8bbd203e64 2024-12-10T03:34:55,030 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/49b6fc6a00f747c9a56adfd0b4429eff to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/49b6fc6a00f747c9a56adfd0b4429eff 2024-12-10T03:34:55,030 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/cd00d83b691e474fabe13c68a8a4bfe7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/cd00d83b691e474fabe13c68a8a4bfe7 2024-12-10T03:34:55,030 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/f1a0a1252d08406baa338e47ad1ddba0 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/f1a0a1252d08406baa338e47ad1ddba0 2024-12-10T03:34:55,031 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/4c2b214a0a1f44a6864a813121ba23d8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/4c2b214a0a1f44a6864a813121ba23d8 2024-12-10T03:34:55,031 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/93d49526de2c4dd38b0589c1ebd518c8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/93d49526de2c4dd38b0589c1ebd518c8 2024-12-10T03:34:55,031 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7351a3133450422590b906367b928a25 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7351a3133450422590b906367b928a25 2024-12-10T03:34:55,031 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/b27a6f80cca84580bb8df4e0b092a743 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/b27a6f80cca84580bb8df4e0b092a743 2024-12-10T03:34:55,032 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/89edc3e2bc074173bae72decdcf9adb8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/89edc3e2bc074173bae72decdcf9adb8 2024-12-10T03:34:55,032 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/6bd27408c66f44c48a64f53d27afe175 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/6bd27408c66f44c48a64f53d27afe175 2024-12-10T03:34:55,032 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ce554981aed54611949ee2218b77ef6e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ce554981aed54611949ee2218b77ef6e 2024-12-10T03:34:55,033 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7829ef488b1349849ac0704e423baf36 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/7829ef488b1349849ac0704e423baf36 2024-12-10T03:34:55,033 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ab84a2d48275426b9c9511b350b29072 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/ab84a2d48275426b9c9511b350b29072 2024-12-10T03:34:55,033 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/33cd6d5348ff410aa568f7f4186df915 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/33cd6d5348ff410aa568f7f4186df915 2024-12-10T03:34:55,033 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2471f45757a94ae6b6948ca8e47b47e7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2471f45757a94ae6b6948ca8e47b47e7 2024-12-10T03:34:55,033 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/9cc2c5d632d34baf8a1418306d68da1f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/9cc2c5d632d34baf8a1418306d68da1f 2024-12-10T03:34:55,034 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/715e02e574cb4dcc923929ce4c89c60e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/715e02e574cb4dcc923929ce4c89c60e 2024-12-10T03:34:55,034 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2bd903aef91a435ea808d3a4205ebf4f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/2bd903aef91a435ea808d3a4205ebf4f 2024-12-10T03:34:55,034 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c034c8fe17cf4b68b0f46e8f9e39fe63 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c034c8fe17cf4b68b0f46e8f9e39fe63 2024-12-10T03:34:55,035 DEBUG [StoreCloser-TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f0b0c18925ef4a4590f547a054a7f40b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/09ae9e4aa6b04262b88ca2c0c83cdfa3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/eadaba43668643ddbe0e5281e5fd047c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/79f9778078174465ab3a755ef5ec095f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/19166a677a304cc7b7f7f89214630e3e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0d36721194af439f9bfadb425d899270, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/b53132fe502741169c29e459c523a0f8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0ca79440029543908a55e5863e074497, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/ffb7ab777cd94d71882b089bb7f5b5cf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/10f35e9a499a4d63880e132bdc928440, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/5dfdf4a05c384efe9267660863a18337, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/377503f9f5f44dbf9943806b7fb2a2e9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f19a334148544697945365832ff6c27f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f7b61af0ca1742cca04af1ee6765572e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/82eca143f0b14d2eb8b1da8705e863c5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/3f4387db90d84b25b91f0e10c33c4009, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/30e57998249f469982cd658ed8d75be2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/37a935d46b604d40866ee475d702bb82, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/2d36c3e86c714f3c908b030e9b547edd] to archive 2024-12-10T03:34:55,036 DEBUG [StoreCloser-TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:34:55,038 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f0b0c18925ef4a4590f547a054a7f40b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f0b0c18925ef4a4590f547a054a7f40b 2024-12-10T03:34:55,038 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/79f9778078174465ab3a755ef5ec095f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/79f9778078174465ab3a755ef5ec095f 2024-12-10T03:34:55,038 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/09ae9e4aa6b04262b88ca2c0c83cdfa3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/09ae9e4aa6b04262b88ca2c0c83cdfa3 2024-12-10T03:34:55,038 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/eadaba43668643ddbe0e5281e5fd047c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/eadaba43668643ddbe0e5281e5fd047c 2024-12-10T03:34:55,038 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/19166a677a304cc7b7f7f89214630e3e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/19166a677a304cc7b7f7f89214630e3e 2024-12-10T03:34:55,038 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0ca79440029543908a55e5863e074497 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0ca79440029543908a55e5863e074497 2024-12-10T03:34:55,038 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/b53132fe502741169c29e459c523a0f8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/b53132fe502741169c29e459c523a0f8 2024-12-10T03:34:55,039 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0d36721194af439f9bfadb425d899270 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/0d36721194af439f9bfadb425d899270 2024-12-10T03:34:55,039 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/ffb7ab777cd94d71882b089bb7f5b5cf to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/ffb7ab777cd94d71882b089bb7f5b5cf 2024-12-10T03:34:55,039 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/10f35e9a499a4d63880e132bdc928440 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/10f35e9a499a4d63880e132bdc928440 2024-12-10T03:34:55,039 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/5dfdf4a05c384efe9267660863a18337 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/5dfdf4a05c384efe9267660863a18337 2024-12-10T03:34:55,040 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/377503f9f5f44dbf9943806b7fb2a2e9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/377503f9f5f44dbf9943806b7fb2a2e9 2024-12-10T03:34:55,040 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/82eca143f0b14d2eb8b1da8705e863c5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/82eca143f0b14d2eb8b1da8705e863c5 2024-12-10T03:34:55,040 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f7b61af0ca1742cca04af1ee6765572e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f7b61af0ca1742cca04af1ee6765572e 2024-12-10T03:34:55,040 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f19a334148544697945365832ff6c27f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/f19a334148544697945365832ff6c27f 2024-12-10T03:34:55,040 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/3f4387db90d84b25b91f0e10c33c4009 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/3f4387db90d84b25b91f0e10c33c4009 2024-12-10T03:34:55,041 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/2d36c3e86c714f3c908b030e9b547edd to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/2d36c3e86c714f3c908b030e9b547edd 2024-12-10T03:34:55,041 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/30e57998249f469982cd658ed8d75be2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/30e57998249f469982cd658ed8d75be2 2024-12-10T03:34:55,041 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/37a935d46b604d40866ee475d702bb82 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/37a935d46b604d40866ee475d702bb82 2024-12-10T03:34:55,044 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/recovered.edits/327.seqid, newMaxSeqId=327, maxSeqId=1 2024-12-10T03:34:55,044 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c. 2024-12-10T03:34:55,044 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1635): Region close journal for 782122bcf84a1b8761da14e17014109c: 2024-12-10T03:34:55,045 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(170): Closed 782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:55,046 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=85 updating hbase:meta row=782122bcf84a1b8761da14e17014109c, regionState=CLOSED 2024-12-10T03:34:55,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-10T03:34:55,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; CloseRegionProcedure 782122bcf84a1b8761da14e17014109c, server=50b9ef1c5472,37553,1733801610862 in 1.8730 sec 2024-12-10T03:34:55,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=84 2024-12-10T03:34:55,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=84, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=782122bcf84a1b8761da14e17014109c, UNASSIGN in 1.8770 sec 2024-12-10T03:34:55,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-10T03:34:55,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8810 sec 2024-12-10T03:34:55,051 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801695050"}]},"ts":"1733801695050"} 2024-12-10T03:34:55,051 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T03:34:55,083 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T03:34:55,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9410 sec 2024-12-10T03:34:55,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T03:34:55,256 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-10T03:34:55,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T03:34:55,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:55,261 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=87, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:55,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T03:34:55,263 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=87, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:55,266 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:55,269 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/recovered.edits] 2024-12-10T03:34:55,273 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/01147a85c1864254b1c5337e36d7358e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/01147a85c1864254b1c5337e36d7358e 2024-12-10T03:34:55,273 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/2950a10625804cebb284280d88a75fb9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/2950a10625804cebb284280d88a75fb9 2024-12-10T03:34:55,273 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/844a9d5aa899467390182459ce999c0e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/844a9d5aa899467390182459ce999c0e 2024-12-10T03:34:55,273 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/7176368c53d94205834cee66499996f8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/A/7176368c53d94205834cee66499996f8 2024-12-10T03:34:55,276 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/1add28bfebf74e0ba892adf0d2f44d8c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/1add28bfebf74e0ba892adf0d2f44d8c 2024-12-10T03:34:55,276 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/45a595dd79c74de08c7574498f22605d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/45a595dd79c74de08c7574498f22605d 2024-12-10T03:34:55,276 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/609fefe75d3249ecb568b80a395e6c64 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/609fefe75d3249ecb568b80a395e6c64 2024-12-10T03:34:55,276 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c3b205478b5d426a971e8793b0ece487 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/B/c3b205478b5d426a971e8793b0ece487 2024-12-10T03:34:55,279 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/48b9a9a97c78487cb983fd72007b0047 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/48b9a9a97c78487cb983fd72007b0047 2024-12-10T03:34:55,280 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/be290cd902544598bc56edd748133d8e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/be290cd902544598bc56edd748133d8e 2024-12-10T03:34:55,280 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/38d4c50d46194575b977e7bf634e4f85 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/38d4c50d46194575b977e7bf634e4f85 2024-12-10T03:34:55,280 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/a827edd7d93f49d8b73aed26aa17fd16 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/C/a827edd7d93f49d8b73aed26aa17fd16 2024-12-10T03:34:55,282 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/recovered.edits/327.seqid to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c/recovered.edits/327.seqid 2024-12-10T03:34:55,283 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/782122bcf84a1b8761da14e17014109c 2024-12-10T03:34:55,283 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T03:34:55,285 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=87, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:55,290 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T03:34:55,292 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T03:34:55,293 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=87, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:55,293 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T03:34:55,294 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733801695293"}]},"ts":"9223372036854775807"} 2024-12-10T03:34:55,295 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T03:34:55,295 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 782122bcf84a1b8761da14e17014109c, NAME => 'TestAcidGuarantees,,1733801670874.782122bcf84a1b8761da14e17014109c.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T03:34:55,295 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T03:34:55,295 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733801695295"}]},"ts":"9223372036854775807"} 2024-12-10T03:34:55,296 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T03:34:55,300 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=87, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:55,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 42 msec 2024-12-10T03:34:55,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-10T03:34:55,365 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-10T03:34:55,379 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=247 (was 248), OpenFileDescriptor=459 (was 466), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=281 (was 279) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3321 (was 3349) 2024-12-10T03:34:55,390 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=247, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=281, ProcessCount=11, AvailableMemoryMB=3320 2024-12-10T03:34:55,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:34:55,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:34:55,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=88, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:55,394 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T03:34:55,394 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:55,394 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 88 2024-12-10T03:34:55,395 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T03:34:55,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-10T03:34:55,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742145_1321 (size=963) 2024-12-10T03:34:55,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-10T03:34:55,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-10T03:34:55,807 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:34:55,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742146_1322 (size=53) 2024-12-10T03:34:56,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-10T03:34:56,220 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:34:56,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 419a7ce5f33ca468fbba7b43cce2b2ec, disabling compactions & flushes 2024-12-10T03:34:56,221 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:56,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:56,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. after waiting 0 ms 2024-12-10T03:34:56,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:56,221 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:56,221 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:34:56,224 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T03:34:56,224 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733801696224"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733801696224"}]},"ts":"1733801696224"} 2024-12-10T03:34:56,227 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T03:34:56,228 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T03:34:56,228 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801696228"}]},"ts":"1733801696228"} 2024-12-10T03:34:56,229 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T03:34:56,276 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, ASSIGN}] 2024-12-10T03:34:56,277 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, ASSIGN 2024-12-10T03:34:56,279 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, ASSIGN; state=OFFLINE, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=false 2024-12-10T03:34:56,430 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=419a7ce5f33ca468fbba7b43cce2b2ec, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:56,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; OpenRegionProcedure 419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:56,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-10T03:34:56,587 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:56,594 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:56,594 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7285): Opening region: {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:34:56,595 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,595 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:34:56,595 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7327): checking encryption for 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,595 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(7330): checking classloading for 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,598 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,599 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:56,600 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 419a7ce5f33ca468fbba7b43cce2b2ec columnFamilyName A 2024-12-10T03:34:56,600 DEBUG [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:56,600 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(327): Store=419a7ce5f33ca468fbba7b43cce2b2ec/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:56,600 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,602 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:56,602 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 419a7ce5f33ca468fbba7b43cce2b2ec columnFamilyName B 2024-12-10T03:34:56,602 DEBUG [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:56,602 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(327): Store=419a7ce5f33ca468fbba7b43cce2b2ec/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:56,602 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,604 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:56,604 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 419a7ce5f33ca468fbba7b43cce2b2ec columnFamilyName C 2024-12-10T03:34:56,604 DEBUG [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:56,604 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(327): Store=419a7ce5f33ca468fbba7b43cce2b2ec/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:56,605 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:56,605 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,606 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,608 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:34:56,609 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1085): writing seq id for 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:56,612 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:34:56,613 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1102): Opened 419a7ce5f33ca468fbba7b43cce2b2ec; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71601216, jitterRate=0.0669412612915039}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:34:56,614 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegion(1001): Region open journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:34:56,614 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., pid=90, masterSystemTime=1733801696586 2024-12-10T03:34:56,616 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:56,616 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=90}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:56,617 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=419a7ce5f33ca468fbba7b43cce2b2ec, regionState=OPEN, openSeqNum=2, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:56,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-10T03:34:56,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; OpenRegionProcedure 419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 in 185 msec 2024-12-10T03:34:56,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-12-10T03:34:56,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, ASSIGN in 345 msec 2024-12-10T03:34:56,623 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T03:34:56,624 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801696624"}]},"ts":"1733801696624"} 2024-12-10T03:34:56,625 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T03:34:56,643 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=88, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T03:34:56,645 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2510 sec 2024-12-10T03:34:57,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-10T03:34:57,505 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 88 completed 2024-12-10T03:34:57,508 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51f7d511 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75b14fbd 2024-12-10T03:34:57,544 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b6cf8cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:57,547 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:57,549 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:57,551 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T03:34:57,552 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T03:34:57,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:34:57,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:34:57,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T03:34:57,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742147_1323 (size=999) 2024-12-10T03:34:57,969 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T03:34:57,969 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T03:34:57,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:34:57,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, REOPEN/MOVE}] 2024-12-10T03:34:57,980 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, REOPEN/MOVE 2024-12-10T03:34:57,981 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=419a7ce5f33ca468fbba7b43cce2b2ec, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:57,982 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:34:57,982 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure 419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:58,134 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,135 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,135 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:34:58,135 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing 419a7ce5f33ca468fbba7b43cce2b2ec, disabling compactions & flushes 2024-12-10T03:34:58,135 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,135 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,135 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. after waiting 0 ms 2024-12-10T03:34:58,135 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,141 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T03:34:58,142 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,142 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:34:58,142 WARN [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionServer(3786): Not adding moved region record: 419a7ce5f33ca468fbba7b43cce2b2ec to self. 2024-12-10T03:34:58,143 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,143 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=419a7ce5f33ca468fbba7b43cce2b2ec, regionState=CLOSED 2024-12-10T03:34:58,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-10T03:34:58,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure 419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 in 162 msec 2024-12-10T03:34:58,146 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, REOPEN/MOVE; state=CLOSED, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=true 2024-12-10T03:34:58,297 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=419a7ce5f33ca468fbba7b43cce2b2ec, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=93, state=RUNNABLE; OpenRegionProcedure 419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:34:58,452 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,457 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,457 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7285): Opening region: {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:34:58,458 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,458 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:34:58,459 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7327): checking encryption for 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,459 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(7330): checking classloading for 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,461 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,462 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:58,463 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 419a7ce5f33ca468fbba7b43cce2b2ec columnFamilyName A 2024-12-10T03:34:58,465 DEBUG [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:58,466 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(327): Store=419a7ce5f33ca468fbba7b43cce2b2ec/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:58,466 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,467 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:58,467 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 419a7ce5f33ca468fbba7b43cce2b2ec columnFamilyName B 2024-12-10T03:34:58,467 DEBUG [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:58,468 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(327): Store=419a7ce5f33ca468fbba7b43cce2b2ec/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:58,468 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,468 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:34:58,469 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 419a7ce5f33ca468fbba7b43cce2b2ec columnFamilyName C 2024-12-10T03:34:58,469 DEBUG [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:58,469 INFO [StoreOpener-419a7ce5f33ca468fbba7b43cce2b2ec-1 {}] regionserver.HStore(327): Store=419a7ce5f33ca468fbba7b43cce2b2ec/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:34:58,469 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,470 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,471 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,473 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:34:58,474 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1085): writing seq id for 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,475 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1102): Opened 419a7ce5f33ca468fbba7b43cce2b2ec; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68537169, jitterRate=0.021283403038978577}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:34:58,475 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegion(1001): Region open journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:34:58,476 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., pid=95, masterSystemTime=1733801698452 2024-12-10T03:34:58,477 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,477 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=95}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,477 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=419a7ce5f33ca468fbba7b43cce2b2ec, regionState=OPEN, openSeqNum=5, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=93 2024-12-10T03:34:58,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; OpenRegionProcedure 419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 in 179 msec 2024-12-10T03:34:58,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-10T03:34:58,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, REOPEN/MOVE in 501 msec 2024-12-10T03:34:58,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-10T03:34:58,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 508 msec 2024-12-10T03:34:58,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 927 msec 2024-12-10T03:34:58,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-10T03:34:58,487 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc42ea6 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62f74604 2024-12-10T03:34:58,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec15031, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,545 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x117e86d9 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49e13594 2024-12-10T03:34:58,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd5b441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,561 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cd96549 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c54a0d3 2024-12-10T03:34:58,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c336ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,570 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31aea41b to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3875c8c5 2024-12-10T03:34:58,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f94d721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,578 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e247aa1 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@801ba40 2024-12-10T03:34:58,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319559be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,585 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27539bdc to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c907e21 2024-12-10T03:34:58,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683f8469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,594 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e3203d9 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61ec0f48 2024-12-10T03:34:58,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e4d3d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,602 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x798e7fd4 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7819b9e2 2024-12-10T03:34:58,610 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b308f62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,611 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7284f16d to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47679076 2024-12-10T03:34:58,618 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68035c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,620 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37a637ac to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4cb9e50e 2024-12-10T03:34:58,626 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3eab689a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:34:58,630 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:34:58,631 DEBUG [hconnection-0x70fddd2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees 2024-12-10T03:34:58,631 DEBUG [hconnection-0x43af5278-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,631 DEBUG [hconnection-0x21ada1b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-10T03:34:58,633 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:34:58,633 DEBUG [hconnection-0x766b625e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,633 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,633 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,633 DEBUG [hconnection-0x780b56d8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,633 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,634 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35842, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,634 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:34:58,634 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,634 DEBUG [hconnection-0x7f12bf78-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:34:58,635 DEBUG [hconnection-0x6aee208e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,635 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35862, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,635 DEBUG [hconnection-0xa46ce5a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,636 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,636 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35874, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,636 DEBUG [hconnection-0x309d2101-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,637 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,638 DEBUG [hconnection-0x2fc5258b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:34:58,639 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:34:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:58,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:34:58,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:34:58,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:58,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:34:58,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:58,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:34:58,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:34:58,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801758656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801758656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801758657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801758657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801758657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d527eed9628b4dada1f84eb543930b19_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801698643/Put/seqid=0 2024-12-10T03:34:58,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742148_1324 (size=14594) 2024-12-10T03:34:58,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-10T03:34:58,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801758758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801758758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801758758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801758758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801758758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:34:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:34:58,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:58,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:58,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:58,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-10T03:34:58,940 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:34:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:34:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:58,941 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:58,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:58,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:58,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801758963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801758964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801758964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801758964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:58,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:58,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801758964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:34:59,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:34:59,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,106 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:34:59,109 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d527eed9628b4dada1f84eb543930b19_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d527eed9628b4dada1f84eb543930b19_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:34:59,110 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/9a79605016144c2ab6b11fce8cbeac7c, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:34:59,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/9a79605016144c2ab6b11fce8cbeac7c is 175, key is test_row_0/A:col10/1733801698643/Put/seqid=0 2024-12-10T03:34:59,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742149_1325 (size=39549) 2024-12-10T03:34:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-10T03:34:59,245 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:34:59,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:34:59,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801759266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801759266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801759267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801759267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801759268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,372 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T03:34:59,397 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:34:59,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:34:59,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,514 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/9a79605016144c2ab6b11fce8cbeac7c 2024-12-10T03:34:59,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/cde87086430a446fa2bd872c02ab15fc is 50, key is test_row_0/B:col10/1733801698643/Put/seqid=0 2024-12-10T03:34:59,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742150_1326 (size=12001) 2024-12-10T03:34:59,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:34:59,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:34:59,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,702 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:34:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:34:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-10T03:34:59,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801759770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801759770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801759770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801759772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:34:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801759774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,854 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:34:59,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:34:59,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:34:59,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:34:59,854 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:34:59,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/cde87086430a446fa2bd872c02ab15fc 2024-12-10T03:34:59,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1 is 50, key is test_row_0/C:col10/1733801698643/Put/seqid=0 2024-12-10T03:34:59,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742151_1327 (size=12001) 2024-12-10T03:35:00,006 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:35:00,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:00,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:00,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:00,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,162 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:35:00,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:00,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:00,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:00,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,315 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:35:00,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:00,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:00,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:00,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:00,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1 2024-12-10T03:35:00,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/9a79605016144c2ab6b11fce8cbeac7c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/9a79605016144c2ab6b11fce8cbeac7c 2024-12-10T03:35:00,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/9a79605016144c2ab6b11fce8cbeac7c, entries=200, sequenceid=16, filesize=38.6 K 2024-12-10T03:35:00,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/cde87086430a446fa2bd872c02ab15fc as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/cde87086430a446fa2bd872c02ab15fc 2024-12-10T03:35:00,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/cde87086430a446fa2bd872c02ab15fc, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T03:35:00,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1 2024-12-10T03:35:00,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T03:35:00,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1735ms, sequenceid=16, compaction requested=false 2024-12-10T03:35:00,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:00,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-10T03:35:00,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:00,468 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T03:35:00,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:00,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:00,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:00,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:00,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:00,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:00,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fbcacca8eafe4c89aefa8ddcc867e281_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801698655/Put/seqid=0 2024-12-10T03:35:00,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742152_1328 (size=12154) 2024-12-10T03:35:00,570 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T03:35:00,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-10T03:35:00,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:00,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:00,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801760781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801760781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801760784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801760785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801760785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:00,879 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fbcacca8eafe4c89aefa8ddcc867e281_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fbcacca8eafe4c89aefa8ddcc867e281_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:00,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/e00673c7b87d4d6f831e9575f8016252, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:00,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/e00673c7b87d4d6f831e9575f8016252 is 175, key is test_row_0/A:col10/1733801698655/Put/seqid=0 2024-12-10T03:35:00,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742153_1329 (size=30955) 2024-12-10T03:35:00,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801760886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801760886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801760888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801760889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:00,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:00,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801760891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801761088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801761089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801761093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801761093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801761094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,284 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/e00673c7b87d4d6f831e9575f8016252 2024-12-10T03:35:01,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/ad38f1b601ac44a8b664b96db1e1573e is 50, key is test_row_0/B:col10/1733801698655/Put/seqid=0 2024-12-10T03:35:01,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742154_1330 (size=12001) 2024-12-10T03:35:01,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801761391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801761392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801761397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801761397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801761398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,710 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/ad38f1b601ac44a8b664b96db1e1573e 2024-12-10T03:35:01,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/7d334b7092eb4701a4f240b04721e3a4 is 50, key is test_row_0/C:col10/1733801698655/Put/seqid=0 2024-12-10T03:35:01,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742155_1331 (size=12001) 2024-12-10T03:35:01,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801761895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801761897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801761900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801761906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:01,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801761906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:02,119 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/7d334b7092eb4701a4f240b04721e3a4 2024-12-10T03:35:02,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/e00673c7b87d4d6f831e9575f8016252 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/e00673c7b87d4d6f831e9575f8016252 2024-12-10T03:35:02,127 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/e00673c7b87d4d6f831e9575f8016252, entries=150, sequenceid=40, filesize=30.2 K 2024-12-10T03:35:02,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/ad38f1b601ac44a8b664b96db1e1573e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/ad38f1b601ac44a8b664b96db1e1573e 2024-12-10T03:35:02,132 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/ad38f1b601ac44a8b664b96db1e1573e, entries=150, sequenceid=40, filesize=11.7 K 2024-12-10T03:35:02,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/7d334b7092eb4701a4f240b04721e3a4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/7d334b7092eb4701a4f240b04721e3a4 2024-12-10T03:35:02,137 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/7d334b7092eb4701a4f240b04721e3a4, entries=150, sequenceid=40, filesize=11.7 K 2024-12-10T03:35:02,137 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1670ms, sequenceid=40, compaction requested=false 2024-12-10T03:35:02,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:02,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:02,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=97 2024-12-10T03:35:02,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=97 2024-12-10T03:35:02,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-10T03:35:02,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5050 sec 2024-12-10T03:35:02,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees in 3.5110 sec 2024-12-10T03:35:02,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-10T03:35:02,739 INFO [Thread-1479 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-10T03:35:02,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:02,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-12-10T03:35:02,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T03:35:02,741 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:02,741 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:02,741 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:02,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T03:35:02,892 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:02,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-10T03:35:02,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:02,892 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:35:02,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:02,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:02,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:02,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:02,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:02,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:02,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121053d07536ba9f4c5993c2e745da8d610a_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801700783/Put/seqid=0 2024-12-10T03:35:02,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742156_1332 (size=12154) 2024-12-10T03:35:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:02,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:02,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:02,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801762946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801762945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:02,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801762946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:02,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:02,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801762954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:02,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:02,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801762954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T03:35:03,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801763055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801763055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801763055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801763060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801763060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801763259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801763260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801763260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801763263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801763263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:03,305 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121053d07536ba9f4c5993c2e745da8d610a_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121053d07536ba9f4c5993c2e745da8d610a_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:03,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/06b04e9886f947a6aa3b89104e1e8e8a, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:03,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/06b04e9886f947a6aa3b89104e1e8e8a is 175, key is test_row_0/A:col10/1733801700783/Put/seqid=0 2024-12-10T03:35:03,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742157_1333 (size=30955) 2024-12-10T03:35:03,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T03:35:03,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801763564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801763564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801763565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801763568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:03,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801763569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:03,709 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/06b04e9886f947a6aa3b89104e1e8e8a 2024-12-10T03:35:03,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/2fd39895f7604440a1f88c3f2aed0a28 is 50, key is test_row_0/B:col10/1733801700783/Put/seqid=0 2024-12-10T03:35:03,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742158_1334 (size=12001) 2024-12-10T03:35:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T03:35:04,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:04,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801764068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:04,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:04,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801764069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:04,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:04,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801764071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:04,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:04,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801764073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:04,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:04,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801764076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:04,146 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/2fd39895f7604440a1f88c3f2aed0a28 2024-12-10T03:35:04,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/57935f2ffecf4719bf37c0ca12b3b2f3 is 50, key is test_row_0/C:col10/1733801700783/Put/seqid=0 2024-12-10T03:35:04,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742159_1335 (size=12001) 2024-12-10T03:35:04,554 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/57935f2ffecf4719bf37c0ca12b3b2f3 2024-12-10T03:35:04,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/06b04e9886f947a6aa3b89104e1e8e8a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/06b04e9886f947a6aa3b89104e1e8e8a 2024-12-10T03:35:04,561 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/06b04e9886f947a6aa3b89104e1e8e8a, entries=150, sequenceid=53, filesize=30.2 K 2024-12-10T03:35:04,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/2fd39895f7604440a1f88c3f2aed0a28 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/2fd39895f7604440a1f88c3f2aed0a28 2024-12-10T03:35:04,565 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/2fd39895f7604440a1f88c3f2aed0a28, entries=150, sequenceid=53, filesize=11.7 K 2024-12-10T03:35:04,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/57935f2ffecf4719bf37c0ca12b3b2f3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/57935f2ffecf4719bf37c0ca12b3b2f3 2024-12-10T03:35:04,569 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/57935f2ffecf4719bf37c0ca12b3b2f3, entries=150, sequenceid=53, filesize=11.7 K 2024-12-10T03:35:04,569 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1677ms, sequenceid=53, compaction requested=true 2024-12-10T03:35:04,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:04,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:04,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-12-10T03:35:04,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-12-10T03:35:04,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-10T03:35:04,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8290 sec 2024-12-10T03:35:04,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 1.8310 sec 2024-12-10T03:35:04,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T03:35:04,844 INFO [Thread-1479 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-10T03:35:04,845 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:04,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-12-10T03:35:04,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T03:35:04,846 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:04,847 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:04,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:04,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T03:35:04,998 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:04,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T03:35:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:04,999 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T03:35:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:05,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101e878b2320ed47068cd6352f24b303d4_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801702945/Put/seqid=0 2024-12-10T03:35:05,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742160_1336 (size=12154) 2024-12-10T03:35:05,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:05,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:05,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801765083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801765084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801765085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801765086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801765088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T03:35:05,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801765189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801765191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801765191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801765191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801765393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801765396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801765396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801765396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:05,412 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101e878b2320ed47068cd6352f24b303d4_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101e878b2320ed47068cd6352f24b303d4_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:05,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/83ec9228e58240459eafdb4b02df3ac2, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:05,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/83ec9228e58240459eafdb4b02df3ac2 is 175, key is test_row_0/A:col10/1733801702945/Put/seqid=0 2024-12-10T03:35:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742161_1337 (size=30955) 2024-12-10T03:35:05,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T03:35:05,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801765698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801765698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801765698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:05,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801765700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:05,816 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/83ec9228e58240459eafdb4b02df3ac2 2024-12-10T03:35:05,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/6f8d23263ff54593a3e12306082b5c7d is 50, key is test_row_0/B:col10/1733801702945/Put/seqid=0 2024-12-10T03:35:05,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742162_1338 (size=12001) 2024-12-10T03:35:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T03:35:06,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801766200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:06,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:06,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801766202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:06,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:06,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801766203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:06,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801766207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:06,226 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/6f8d23263ff54593a3e12306082b5c7d 2024-12-10T03:35:06,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/536566f1738245dfbc56711b2ae43e29 is 50, key is test_row_0/C:col10/1733801702945/Put/seqid=0 2024-12-10T03:35:06,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742163_1339 (size=12001) 2024-12-10T03:35:06,637 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/536566f1738245dfbc56711b2ae43e29 2024-12-10T03:35:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/83ec9228e58240459eafdb4b02df3ac2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/83ec9228e58240459eafdb4b02df3ac2 2024-12-10T03:35:06,643 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/83ec9228e58240459eafdb4b02df3ac2, entries=150, sequenceid=76, filesize=30.2 K 2024-12-10T03:35:06,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/6f8d23263ff54593a3e12306082b5c7d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6f8d23263ff54593a3e12306082b5c7d 2024-12-10T03:35:06,647 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6f8d23263ff54593a3e12306082b5c7d, entries=150, sequenceid=76, filesize=11.7 K 2024-12-10T03:35:06,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/536566f1738245dfbc56711b2ae43e29 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/536566f1738245dfbc56711b2ae43e29 2024-12-10T03:35:06,651 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/536566f1738245dfbc56711b2ae43e29, entries=150, sequenceid=76, filesize=11.7 K 2024-12-10T03:35:06,652 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1653ms, sequenceid=76, compaction requested=true 2024-12-10T03:35:06,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:06,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:06,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-12-10T03:35:06,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-12-10T03:35:06,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-10T03:35:06,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8060 sec 2024-12-10T03:35:06,654 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 1.8090 sec 2024-12-10T03:35:06,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T03:35:06,950 INFO [Thread-1479 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-10T03:35:06,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:06,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-10T03:35:06,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T03:35:06,952 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:06,952 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:06,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:07,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T03:35:07,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:07,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T03:35:07,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:07,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:07,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:07,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:07,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:07,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:07,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101438842b27a54d829f0354ae9ecc4cd2_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801705085/Put/seqid=0 2024-12-10T03:35:07,103 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:07,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:07,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742164_1340 (size=14594) 2024-12-10T03:35:07,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:07,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801767198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:07,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801767207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:07,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801767208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:07,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801767211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:07,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801767212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T03:35:07,255 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:07,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:07,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:07,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801767303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,407 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:07,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:07,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,505 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:07,508 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101438842b27a54d829f0354ae9ecc4cd2_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101438842b27a54d829f0354ae9ecc4cd2_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:07,508 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/7fbc2c2cd7534874a41176a7d7cb2d37, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:07,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/7fbc2c2cd7534874a41176a7d7cb2d37 is 175, key is test_row_0/A:col10/1733801705085/Put/seqid=0 2024-12-10T03:35:07,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742165_1341 (size=39549) 2024-12-10T03:35:07,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801767509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T03:35:07,560 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:07,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:07,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,712 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:07,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:07,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:07,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801767815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,864 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:07,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:07,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:07,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:07,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:07,913 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=90, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/7fbc2c2cd7534874a41176a7d7cb2d37 2024-12-10T03:35:07,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/7107648a428943fc94f8fd5ec2f95c9b is 50, key is test_row_0/B:col10/1733801705085/Put/seqid=0 2024-12-10T03:35:07,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742166_1342 (size=12001) 2024-12-10T03:35:08,016 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:08,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:08,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:08,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T03:35:08,168 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:08,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:08,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:08,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,320 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:08,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:08,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:08,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801768321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:08,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/7107648a428943fc94f8fd5ec2f95c9b 2024-12-10T03:35:08,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/97c869aff45d4b2e8ebb643902ebd648 is 50, key is test_row_0/C:col10/1733801705085/Put/seqid=0 2024-12-10T03:35:08,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742167_1343 (size=12001) 2024-12-10T03:35:08,473 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:08,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:08,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:08,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,625 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:08,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:08,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:08,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/97c869aff45d4b2e8ebb643902ebd648 2024-12-10T03:35:08,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/7fbc2c2cd7534874a41176a7d7cb2d37 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/7fbc2c2cd7534874a41176a7d7cb2d37 2024-12-10T03:35:08,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/7fbc2c2cd7534874a41176a7d7cb2d37, entries=200, sequenceid=90, filesize=38.6 K 2024-12-10T03:35:08,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/7107648a428943fc94f8fd5ec2f95c9b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7107648a428943fc94f8fd5ec2f95c9b 2024-12-10T03:35:08,777 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:08,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:08,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:08,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:08,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7107648a428943fc94f8fd5ec2f95c9b, entries=150, sequenceid=90, filesize=11.7 K 2024-12-10T03:35:08,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/97c869aff45d4b2e8ebb643902ebd648 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/97c869aff45d4b2e8ebb643902ebd648 2024-12-10T03:35:08,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/97c869aff45d4b2e8ebb643902ebd648, entries=150, sequenceid=90, filesize=11.7 K 2024-12-10T03:35:08,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1693ms, sequenceid=90, compaction requested=true 2024-12-10T03:35:08,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:08,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:08,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:08,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:08,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:08,789 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:35:08,789 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:35:08,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:08,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:08,790 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:35:08,790 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 171963 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:35:08,790 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/B is initiating minor compaction (all files) 2024-12-10T03:35:08,790 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/A is initiating minor compaction (all files) 2024-12-10T03:35:08,790 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/A in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,790 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/B in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,790 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/cde87086430a446fa2bd872c02ab15fc, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/ad38f1b601ac44a8b664b96db1e1573e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/2fd39895f7604440a1f88c3f2aed0a28, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6f8d23263ff54593a3e12306082b5c7d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7107648a428943fc94f8fd5ec2f95c9b] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=58.6 K 2024-12-10T03:35:08,790 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/9a79605016144c2ab6b11fce8cbeac7c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/e00673c7b87d4d6f831e9575f8016252, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/06b04e9886f947a6aa3b89104e1e8e8a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/83ec9228e58240459eafdb4b02df3ac2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/7fbc2c2cd7534874a41176a7d7cb2d37] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=167.9 K 2024-12-10T03:35:08,790 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,790 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/9a79605016144c2ab6b11fce8cbeac7c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/e00673c7b87d4d6f831e9575f8016252, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/06b04e9886f947a6aa3b89104e1e8e8a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/83ec9228e58240459eafdb4b02df3ac2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/7fbc2c2cd7534874a41176a7d7cb2d37] 2024-12-10T03:35:08,791 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting cde87086430a446fa2bd872c02ab15fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801698640 2024-12-10T03:35:08,791 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a79605016144c2ab6b11fce8cbeac7c, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801698640 2024-12-10T03:35:08,791 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ad38f1b601ac44a8b664b96db1e1573e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733801698653 2024-12-10T03:35:08,791 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting e00673c7b87d4d6f831e9575f8016252, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733801698653 2024-12-10T03:35:08,791 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fd39895f7604440a1f88c3f2aed0a28, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801700779 2024-12-10T03:35:08,791 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06b04e9886f947a6aa3b89104e1e8e8a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801700779 2024-12-10T03:35:08,791 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83ec9228e58240459eafdb4b02df3ac2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733801702945 2024-12-10T03:35:08,791 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f8d23263ff54593a3e12306082b5c7d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733801702945 2024-12-10T03:35:08,792 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fbc2c2cd7534874a41176a7d7cb2d37, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801705083 2024-12-10T03:35:08,792 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7107648a428943fc94f8fd5ec2f95c9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801705085 2024-12-10T03:35:08,799 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#B#compaction#291 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:08,800 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/0fbc3777ddce44afa8b4f215cb1d547c is 50, key is test_row_0/B:col10/1733801705085/Put/seqid=0 2024-12-10T03:35:08,804 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:08,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742168_1344 (size=12173) 2024-12-10T03:35:08,806 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121042e87245dc934675a7ed41d824d672d8_419a7ce5f33ca468fbba7b43cce2b2ec store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:08,809 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121042e87245dc934675a7ed41d824d672d8_419a7ce5f33ca468fbba7b43cce2b2ec, store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:08,809 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121042e87245dc934675a7ed41d824d672d8_419a7ce5f33ca468fbba7b43cce2b2ec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:08,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742169_1345 (size=4469) 2024-12-10T03:35:08,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:08,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T03:35:08,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:08,930 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T03:35:08,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:08,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:08,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:08,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:08,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:08,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:08,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103ab256930d004eb3b42dd421ec9d8660_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801707192/Put/seqid=0 2024-12-10T03:35:08,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742170_1346 (size=12154) 2024-12-10T03:35:08,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:08,944 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103ab256930d004eb3b42dd421ec9d8660_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103ab256930d004eb3b42dd421ec9d8660_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:08,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/0440367f0d6041479a6a33cf7f00ac1a, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:08,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/0440367f0d6041479a6a33cf7f00ac1a is 175, key is test_row_0/A:col10/1733801707192/Put/seqid=0 2024-12-10T03:35:08,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742171_1347 (size=30955) 2024-12-10T03:35:08,950 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=112, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/0440367f0d6041479a6a33cf7f00ac1a 2024-12-10T03:35:08,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/4264c4e0ae7f4258873a9ce32aee8d88 is 50, key is test_row_0/B:col10/1733801707192/Put/seqid=0 2024-12-10T03:35:08,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742172_1348 (size=12001) 2024-12-10T03:35:08,960 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/4264c4e0ae7f4258873a9ce32aee8d88 2024-12-10T03:35:08,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/748e2f0322c54bbd850ab92e3e139042 is 50, key is test_row_0/C:col10/1733801707192/Put/seqid=0 2024-12-10T03:35:08,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742173_1349 (size=12001) 2024-12-10T03:35:09,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T03:35:09,213 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/0fbc3777ddce44afa8b4f215cb1d547c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/0fbc3777ddce44afa8b4f215cb1d547c 2024-12-10T03:35:09,215 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#A#compaction#292 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:09,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/82719cbba18b4a419eb87c72e03c1692 is 175, key is test_row_0/A:col10/1733801705085/Put/seqid=0 2024-12-10T03:35:09,218 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/B of 419a7ce5f33ca468fbba7b43cce2b2ec into 0fbc3777ddce44afa8b4f215cb1d547c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:09,218 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:09,218 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/B, priority=11, startTime=1733801708789; duration=0sec 2024-12-10T03:35:09,218 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:09,218 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:B 2024-12-10T03:35:09,218 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:35:09,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742174_1350 (size=31127) 2024-12-10T03:35:09,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:09,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:09,224 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:35:09,224 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/C is initiating minor compaction (all files) 2024-12-10T03:35:09,224 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/C in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:09,224 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/7d334b7092eb4701a4f240b04721e3a4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/57935f2ffecf4719bf37c0ca12b3b2f3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/536566f1738245dfbc56711b2ae43e29, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/97c869aff45d4b2e8ebb643902ebd648] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=58.6 K 2024-12-10T03:35:09,224 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bcd48ffe5bd4d6e9a591367f9b5bdb1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801698640 2024-12-10T03:35:09,225 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d334b7092eb4701a4f240b04721e3a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733801698653 2024-12-10T03:35:09,225 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 57935f2ffecf4719bf37c0ca12b3b2f3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801700779 2024-12-10T03:35:09,225 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 536566f1738245dfbc56711b2ae43e29, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733801702945 2024-12-10T03:35:09,225 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 97c869aff45d4b2e8ebb643902ebd648, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801705085 2024-12-10T03:35:09,233 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#C#compaction#296 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:09,233 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/d7d7ed0104ad4657a8a7d1812d89da7a is 50, key is test_row_0/C:col10/1733801705085/Put/seqid=0 2024-12-10T03:35:09,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742175_1351 (size=12173) 2024-12-10T03:35:09,240 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/d7d7ed0104ad4657a8a7d1812d89da7a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d7d7ed0104ad4657a8a7d1812d89da7a 2024-12-10T03:35:09,244 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/C of 419a7ce5f33ca468fbba7b43cce2b2ec into d7d7ed0104ad4657a8a7d1812d89da7a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:09,244 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:09,244 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/C, priority=11, startTime=1733801708789; duration=0sec 2024-12-10T03:35:09,244 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:09,244 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:C 2024-12-10T03:35:09,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801769237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801769238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801769238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801769244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801769334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801769345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801769345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801769345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801769353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,371 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/748e2f0322c54bbd850ab92e3e139042 2024-12-10T03:35:09,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/0440367f0d6041479a6a33cf7f00ac1a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/0440367f0d6041479a6a33cf7f00ac1a 2024-12-10T03:35:09,377 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/0440367f0d6041479a6a33cf7f00ac1a, entries=150, sequenceid=112, filesize=30.2 K 2024-12-10T03:35:09,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/4264c4e0ae7f4258873a9ce32aee8d88 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/4264c4e0ae7f4258873a9ce32aee8d88 2024-12-10T03:35:09,381 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/4264c4e0ae7f4258873a9ce32aee8d88, entries=150, sequenceid=112, filesize=11.7 K 2024-12-10T03:35:09,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/748e2f0322c54bbd850ab92e3e139042 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/748e2f0322c54bbd850ab92e3e139042 2024-12-10T03:35:09,384 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/748e2f0322c54bbd850ab92e3e139042, entries=150, sequenceid=112, filesize=11.7 K 2024-12-10T03:35:09,385 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 419a7ce5f33ca468fbba7b43cce2b2ec in 455ms, sequenceid=112, compaction requested=false 2024-12-10T03:35:09,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:09,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:09,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-10T03:35:09,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-10T03:35:09,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-10T03:35:09,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4340 sec 2024-12-10T03:35:09,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.4360 sec 2024-12-10T03:35:09,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:09,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T03:35:09,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:09,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:09,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:09,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:09,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:09,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:09,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121071a409ee91a94d889894008e2c323987_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801709236/Put/seqid=0 2024-12-10T03:35:09,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742176_1352 (size=12254) 2024-12-10T03:35:09,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801769579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801769580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801769589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801769589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,623 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/82719cbba18b4a419eb87c72e03c1692 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82719cbba18b4a419eb87c72e03c1692 2024-12-10T03:35:09,627 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/A of 419a7ce5f33ca468fbba7b43cce2b2ec into 82719cbba18b4a419eb87c72e03c1692(size=30.4 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:09,627 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:09,627 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/A, priority=11, startTime=1733801708789; duration=0sec 2024-12-10T03:35:09,627 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:09,627 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:A 2024-12-10T03:35:09,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801769690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801769690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801769694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801769695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801769894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801769894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801769899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801769900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:09,965 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:09,968 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121071a409ee91a94d889894008e2c323987_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121071a409ee91a94d889894008e2c323987_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:09,969 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/b7d89f6c74484e8ca61137e9cc2c615c, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:09,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/b7d89f6c74484e8ca61137e9cc2c615c is 175, key is test_row_0/A:col10/1733801709236/Put/seqid=0 2024-12-10T03:35:09,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742177_1353 (size=31055) 2024-12-10T03:35:10,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:10,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:10,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801770199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:10,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801770199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:10,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:10,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801770203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:10,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:10,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801770204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:10,374 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/b7d89f6c74484e8ca61137e9cc2c615c 2024-12-10T03:35:10,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/7706439d09284e539520e48667274406 is 50, key is test_row_0/B:col10/1733801709236/Put/seqid=0 2024-12-10T03:35:10,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742178_1354 (size=12101) 2024-12-10T03:35:10,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:10,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801770705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:10,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:10,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801770707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:10,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:10,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801770707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:10,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801770711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:10,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/7706439d09284e539520e48667274406 2024-12-10T03:35:10,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/949ec78329444e4fb7412c8196a175e3 is 50, key is test_row_0/C:col10/1733801709236/Put/seqid=0 2024-12-10T03:35:10,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742179_1355 (size=12101) 2024-12-10T03:35:11,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T03:35:11,055 INFO [Thread-1479 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-10T03:35:11,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:11,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-10T03:35:11,057 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:11,058 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:11,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:11,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T03:35:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T03:35:11,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/949ec78329444e4fb7412c8196a175e3 2024-12-10T03:35:11,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/b7d89f6c74484e8ca61137e9cc2c615c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/b7d89f6c74484e8ca61137e9cc2c615c 2024-12-10T03:35:11,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/b7d89f6c74484e8ca61137e9cc2c615c, entries=150, sequenceid=130, filesize=30.3 K 2024-12-10T03:35:11,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/7706439d09284e539520e48667274406 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7706439d09284e539520e48667274406 2024-12-10T03:35:11,209 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T03:35:11,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:11,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7706439d09284e539520e48667274406, entries=150, sequenceid=130, filesize=11.8 K 2024-12-10T03:35:11,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/949ec78329444e4fb7412c8196a175e3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/949ec78329444e4fb7412c8196a175e3 2024-12-10T03:35:11,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/949ec78329444e4fb7412c8196a175e3, entries=150, sequenceid=130, filesize=11.8 K 2024-12-10T03:35:11,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1661ms, sequenceid=130, compaction requested=true 2024-12-10T03:35:11,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:11,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:11,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:11,215 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:11,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:11,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:11,215 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:11,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:11,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93137 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36275 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/A is initiating minor compaction (all files) 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/B is initiating minor compaction (all files) 2024-12-10T03:35:11,216 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/A in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,216 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/B in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,216 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82719cbba18b4a419eb87c72e03c1692, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/0440367f0d6041479a6a33cf7f00ac1a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/b7d89f6c74484e8ca61137e9cc2c615c] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=91.0 K 2024-12-10T03:35:11,216 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/0fbc3777ddce44afa8b4f215cb1d547c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/4264c4e0ae7f4258873a9ce32aee8d88, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7706439d09284e539520e48667274406] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=35.4 K 2024-12-10T03:35:11,216 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82719cbba18b4a419eb87c72e03c1692, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/0440367f0d6041479a6a33cf7f00ac1a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/b7d89f6c74484e8ca61137e9cc2c615c] 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82719cbba18b4a419eb87c72e03c1692, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801705085 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fbc3777ddce44afa8b4f215cb1d547c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801705085 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0440367f0d6041479a6a33cf7f00ac1a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1733801707187 2024-12-10T03:35:11,216 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 4264c4e0ae7f4258873a9ce32aee8d88, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1733801707187 2024-12-10T03:35:11,217 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7d89f6c74484e8ca61137e9cc2c615c, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801709236 2024-12-10T03:35:11,217 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7706439d09284e539520e48667274406, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801709236 2024-12-10T03:35:11,222 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:11,223 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#B#compaction#301 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:11,224 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/3aeb6139135a47d8889eda2b0fa6ffbf is 50, key is test_row_0/B:col10/1733801709236/Put/seqid=0 2024-12-10T03:35:11,225 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412103f425caa36714f4192bfbce4caaf84ef_419a7ce5f33ca468fbba7b43cce2b2ec store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:11,227 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412103f425caa36714f4192bfbce4caaf84ef_419a7ce5f33ca468fbba7b43cce2b2ec, store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:11,227 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103f425caa36714f4192bfbce4caaf84ef_419a7ce5f33ca468fbba7b43cce2b2ec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:11,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742180_1356 (size=12375) 2024-12-10T03:35:11,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742181_1357 (size=4469) 2024-12-10T03:35:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:11,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T03:35:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:11,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068f1428cc34f4f3ba8bc41112cbc0e89_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801711352/Put/seqid=0 2024-12-10T03:35:11,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742182_1358 (size=14794) 2024-12-10T03:35:11,361 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:11,361 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T03:35:11,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:11,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,364 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068f1428cc34f4f3ba8bc41112cbc0e89_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068f1428cc34f4f3ba8bc41112cbc0e89_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T03:35:11,365 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/82c76864cf604a0ba0bd25aa98684271, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:11,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/82c76864cf604a0ba0bd25aa98684271 is 175, key is test_row_0/A:col10/1733801711352/Put/seqid=0 2024-12-10T03:35:11,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742183_1359 (size=39749) 2024-12-10T03:35:11,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801771412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,513 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T03:35:11,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:11,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801771518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,636 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/3aeb6139135a47d8889eda2b0fa6ffbf as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/3aeb6139135a47d8889eda2b0fa6ffbf 2024-12-10T03:35:11,640 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/B of 419a7ce5f33ca468fbba7b43cce2b2ec into 3aeb6139135a47d8889eda2b0fa6ffbf(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:11,640 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:11,640 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/B, priority=13, startTime=1733801711215; duration=0sec 2024-12-10T03:35:11,640 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:11,640 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:B 2024-12-10T03:35:11,640 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:11,641 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36275 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:11,641 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/C is initiating minor compaction (all files) 2024-12-10T03:35:11,641 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/C in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,641 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d7d7ed0104ad4657a8a7d1812d89da7a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/748e2f0322c54bbd850ab92e3e139042, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/949ec78329444e4fb7412c8196a175e3] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=35.4 K 2024-12-10T03:35:11,641 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d7d7ed0104ad4657a8a7d1812d89da7a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733801705085 2024-12-10T03:35:11,641 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 748e2f0322c54bbd850ab92e3e139042, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1733801707187 2024-12-10T03:35:11,642 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 949ec78329444e4fb7412c8196a175e3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801709236 2024-12-10T03:35:11,647 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#C#compaction#303 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:11,647 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/2c2081d0139042f98d2c799c76f342e6 is 50, key is test_row_0/C:col10/1733801709236/Put/seqid=0 2024-12-10T03:35:11,650 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#A#compaction#300 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:11,650 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/72ba69291d3747e19e386ee56eb001c7 is 175, key is test_row_0/A:col10/1733801709236/Put/seqid=0 2024-12-10T03:35:11,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742184_1360 (size=12375) 2024-12-10T03:35:11,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742185_1361 (size=31329) 2024-12-10T03:35:11,655 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/2c2081d0139042f98d2c799c76f342e6 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c2081d0139042f98d2c799c76f342e6 2024-12-10T03:35:11,658 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/72ba69291d3747e19e386ee56eb001c7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/72ba69291d3747e19e386ee56eb001c7 2024-12-10T03:35:11,660 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/C of 419a7ce5f33ca468fbba7b43cce2b2ec into 2c2081d0139042f98d2c799c76f342e6(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:11,660 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:11,660 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/C, priority=13, startTime=1733801711215; duration=0sec 2024-12-10T03:35:11,660 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:11,660 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:C 2024-12-10T03:35:11,662 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/A of 419a7ce5f33ca468fbba7b43cce2b2ec into 72ba69291d3747e19e386ee56eb001c7(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:11,662 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:11,662 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/A, priority=13, startTime=1733801711215; duration=0sec 2024-12-10T03:35:11,662 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:11,662 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:A 2024-12-10T03:35:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T03:35:11,666 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T03:35:11,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:11,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801771715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801771715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801771717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:11,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801771721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801771721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,769 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=153, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/82c76864cf604a0ba0bd25aa98684271 2024-12-10T03:35:11,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/6e4c5877b4bb4da4b532c74cfdd0e488 is 50, key is test_row_0/B:col10/1733801711352/Put/seqid=0 2024-12-10T03:35:11,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742186_1362 (size=12151) 2024-12-10T03:35:11,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/6e4c5877b4bb4da4b532c74cfdd0e488 2024-12-10T03:35:11,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/0aba99f8b1714834ae7184b0f6a04f15 is 50, key is test_row_0/C:col10/1733801711352/Put/seqid=0 2024-12-10T03:35:11,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742187_1363 (size=12151) 2024-12-10T03:35:11,818 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T03:35:11,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:11,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,970 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:11,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T03:35:11,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:11,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:11,971 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:11,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:12,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:12,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801772026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:12,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:12,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T03:35:12,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:12,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:12,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:12,123 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:12,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:12,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:12,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T03:35:12,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/0aba99f8b1714834ae7184b0f6a04f15 2024-12-10T03:35:12,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/82c76864cf604a0ba0bd25aa98684271 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82c76864cf604a0ba0bd25aa98684271 2024-12-10T03:35:12,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82c76864cf604a0ba0bd25aa98684271, entries=200, sequenceid=153, filesize=38.8 K 2024-12-10T03:35:12,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/6e4c5877b4bb4da4b532c74cfdd0e488 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6e4c5877b4bb4da4b532c74cfdd0e488 2024-12-10T03:35:12,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6e4c5877b4bb4da4b532c74cfdd0e488, entries=150, sequenceid=153, filesize=11.9 K 2024-12-10T03:35:12,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/0aba99f8b1714834ae7184b0f6a04f15 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0aba99f8b1714834ae7184b0f6a04f15 2024-12-10T03:35:12,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0aba99f8b1714834ae7184b0f6a04f15, entries=150, sequenceid=153, filesize=11.9 K 2024-12-10T03:35:12,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 419a7ce5f33ca468fbba7b43cce2b2ec in 851ms, sequenceid=153, compaction requested=false 2024-12-10T03:35:12,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:12,275 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:12,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T03:35:12,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:12,275 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T03:35:12,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:12,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:12,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:12,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:12,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:12,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:12,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108dc68046d16644509feee2b869199cab_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801711399/Put/seqid=0 2024-12-10T03:35:12,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742188_1364 (size=12304) 2024-12-10T03:35:12,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:12,290 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108dc68046d16644509feee2b869199cab_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108dc68046d16644509feee2b869199cab_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:12,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/1dbad0b74797472fae11d05bd83bb580, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:12,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/1dbad0b74797472fae11d05bd83bb580 is 175, key is test_row_0/A:col10/1733801711399/Put/seqid=0 2024-12-10T03:35:12,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742189_1365 (size=31105) 2024-12-10T03:35:12,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:12,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:12,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:12,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801772606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:12,700 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/1dbad0b74797472fae11d05bd83bb580 2024-12-10T03:35:12,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/704e5b97a2a74730a41467bc35828750 is 50, key is test_row_0/B:col10/1733801711399/Put/seqid=0 2024-12-10T03:35:12,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742190_1366 (size=12151) 2024-12-10T03:35:12,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:12,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801772710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:12,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801772912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,109 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/704e5b97a2a74730a41467bc35828750 2024-12-10T03:35:13,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/d6d7e91767664887b678ec29eebae0fd is 50, key is test_row_0/C:col10/1733801711399/Put/seqid=0 2024-12-10T03:35:13,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742191_1367 (size=12151) 2024-12-10T03:35:13,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T03:35:13,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801773215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,517 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/d6d7e91767664887b678ec29eebae0fd 2024-12-10T03:35:13,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/1dbad0b74797472fae11d05bd83bb580 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/1dbad0b74797472fae11d05bd83bb580 2024-12-10T03:35:13,524 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/1dbad0b74797472fae11d05bd83bb580, entries=150, sequenceid=170, filesize=30.4 K 2024-12-10T03:35:13,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/704e5b97a2a74730a41467bc35828750 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/704e5b97a2a74730a41467bc35828750 2024-12-10T03:35:13,528 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/704e5b97a2a74730a41467bc35828750, entries=150, sequenceid=170, filesize=11.9 K 2024-12-10T03:35:13,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/d6d7e91767664887b678ec29eebae0fd as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d6d7e91767664887b678ec29eebae0fd 2024-12-10T03:35:13,532 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d6d7e91767664887b678ec29eebae0fd, entries=150, sequenceid=170, filesize=11.9 K 2024-12-10T03:35:13,532 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1257ms, sequenceid=170, compaction requested=true 2024-12-10T03:35:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-10T03:35:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-10T03:35:13,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-10T03:35:13,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4750 sec 2024-12-10T03:35:13,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.4780 sec 2024-12-10T03:35:13,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:13,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T03:35:13,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:13,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:13,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:13,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:13,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:13,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:13,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121091e13c63c3f54c74851bd732bca970d6_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801713722/Put/seqid=0 2024-12-10T03:35:13,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742192_1368 (size=14794) 2024-12-10T03:35:13,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801773738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,743 DEBUG [Thread-1477 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., hostname=50b9ef1c5472,37553,1733801610862, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:35:13,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801773739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801773739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801773740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801773741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801773846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801773846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801773846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:13,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:13,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801773848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801774050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801774050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801774050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801774050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,133 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:14,136 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121091e13c63c3f54c74851bd732bca970d6_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121091e13c63c3f54c74851bd732bca970d6_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:14,137 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/cee9eccb012942a8b7c6c09e3639de26, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:14,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/cee9eccb012942a8b7c6c09e3639de26 is 175, key is test_row_0/A:col10/1733801713722/Put/seqid=0 2024-12-10T03:35:14,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742193_1369 (size=39749) 2024-12-10T03:35:14,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801774352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801774354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801774354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801774356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,541 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=193, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/cee9eccb012942a8b7c6c09e3639de26 2024-12-10T03:35:14,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5f3febfb183b49d6ac8ca0b2b266d023 is 50, key is test_row_0/B:col10/1733801713722/Put/seqid=0 2024-12-10T03:35:14,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742194_1370 (size=12151) 2024-12-10T03:35:14,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801774857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801774859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801774861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:14,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801774862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:14,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5f3febfb183b49d6ac8ca0b2b266d023 2024-12-10T03:35:14,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/2236bccd1e074bed98f8b88d3d8dd321 is 50, key is test_row_0/C:col10/1733801713722/Put/seqid=0 2024-12-10T03:35:14,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742195_1371 (size=12151) 2024-12-10T03:35:15,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T03:35:15,167 INFO [Thread-1479 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-10T03:35:15,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-10T03:35:15,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T03:35:15,170 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:15,170 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:15,170 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:15,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T03:35:15,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T03:35:15,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:15,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:15,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:15,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:15,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:15,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:15,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/2236bccd1e074bed98f8b88d3d8dd321 2024-12-10T03:35:15,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/cee9eccb012942a8b7c6c09e3639de26 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/cee9eccb012942a8b7c6c09e3639de26 2024-12-10T03:35:15,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/cee9eccb012942a8b7c6c09e3639de26, entries=200, sequenceid=193, filesize=38.8 K 2024-12-10T03:35:15,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5f3febfb183b49d6ac8ca0b2b266d023 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5f3febfb183b49d6ac8ca0b2b266d023 2024-12-10T03:35:15,368 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5f3febfb183b49d6ac8ca0b2b266d023, entries=150, sequenceid=193, filesize=11.9 K 2024-12-10T03:35:15,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/2236bccd1e074bed98f8b88d3d8dd321 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2236bccd1e074bed98f8b88d3d8dd321 2024-12-10T03:35:15,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2236bccd1e074bed98f8b88d3d8dd321, entries=150, sequenceid=193, filesize=11.9 K 2024-12-10T03:35:15,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1649ms, sequenceid=193, compaction requested=true 2024-12-10T03:35:15,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:15,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:15,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:15,373 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:35:15,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:15,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:15,373 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:35:15,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:15,373 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:15,374 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:35:15,374 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141932 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:35:15,374 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/A is initiating minor compaction (all files) 2024-12-10T03:35:15,374 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/B is initiating minor compaction (all files) 2024-12-10T03:35:15,374 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/A in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:15,374 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/B in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:15,374 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/3aeb6139135a47d8889eda2b0fa6ffbf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6e4c5877b4bb4da4b532c74cfdd0e488, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/704e5b97a2a74730a41467bc35828750, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5f3febfb183b49d6ac8ca0b2b266d023] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=47.7 K 2024-12-10T03:35:15,374 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/72ba69291d3747e19e386ee56eb001c7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82c76864cf604a0ba0bd25aa98684271, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/1dbad0b74797472fae11d05bd83bb580, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/cee9eccb012942a8b7c6c09e3639de26] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=138.6 K 2024-12-10T03:35:15,374 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:15,374 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/72ba69291d3747e19e386ee56eb001c7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82c76864cf604a0ba0bd25aa98684271, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/1dbad0b74797472fae11d05bd83bb580, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/cee9eccb012942a8b7c6c09e3639de26] 2024-12-10T03:35:15,374 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 3aeb6139135a47d8889eda2b0fa6ffbf, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801709236 2024-12-10T03:35:15,375 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72ba69291d3747e19e386ee56eb001c7, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801709236 2024-12-10T03:35:15,375 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e4c5877b4bb4da4b532c74cfdd0e488, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733801709579 2024-12-10T03:35:15,375 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82c76864cf604a0ba0bd25aa98684271, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733801709579 2024-12-10T03:35:15,375 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 704e5b97a2a74730a41467bc35828750, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733801711399 2024-12-10T03:35:15,375 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dbad0b74797472fae11d05bd83bb580, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733801711399 2024-12-10T03:35:15,375 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f3febfb183b49d6ac8ca0b2b266d023, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733801712593 2024-12-10T03:35:15,375 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting cee9eccb012942a8b7c6c09e3639de26, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733801712593 2024-12-10T03:35:15,387 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#B#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:15,387 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/36f47637ec2f4cde89c4a27ca07ae6f6 is 50, key is test_row_0/B:col10/1733801713722/Put/seqid=0 2024-12-10T03:35:15,390 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:15,392 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210f164a6f51dd44ebeb7bf577717dc614e_419a7ce5f33ca468fbba7b43cce2b2ec store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:15,394 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210f164a6f51dd44ebeb7bf577717dc614e_419a7ce5f33ca468fbba7b43cce2b2ec, store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:15,394 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f164a6f51dd44ebeb7bf577717dc614e_419a7ce5f33ca468fbba7b43cce2b2ec because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:15,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742196_1372 (size=12561) 2024-12-10T03:35:15,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742197_1373 (size=4469) 2024-12-10T03:35:15,404 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/36f47637ec2f4cde89c4a27ca07ae6f6 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/36f47637ec2f4cde89c4a27ca07ae6f6 2024-12-10T03:35:15,404 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#A#compaction#313 average throughput is 1.75 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:15,405 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/d63f2aefd69647b7a2141c508e2d63c4 is 175, key is test_row_0/A:col10/1733801713722/Put/seqid=0 2024-12-10T03:35:15,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742198_1374 (size=31515) 2024-12-10T03:35:15,413 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/B of 419a7ce5f33ca468fbba7b43cce2b2ec into 36f47637ec2f4cde89c4a27ca07ae6f6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:15,413 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:15,413 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/B, priority=12, startTime=1733801715373; duration=0sec 2024-12-10T03:35:15,413 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:15,413 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:B 2024-12-10T03:35:15,413 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:35:15,414 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:35:15,414 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 419a7ce5f33ca468fbba7b43cce2b2ec/C is initiating minor compaction (all files) 2024-12-10T03:35:15,414 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 419a7ce5f33ca468fbba7b43cce2b2ec/C in TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:15,414 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c2081d0139042f98d2c799c76f342e6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0aba99f8b1714834ae7184b0f6a04f15, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d6d7e91767664887b678ec29eebae0fd, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2236bccd1e074bed98f8b88d3d8dd321] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp, totalSize=47.7 K 2024-12-10T03:35:15,415 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c2081d0139042f98d2c799c76f342e6, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801709236 2024-12-10T03:35:15,415 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 0aba99f8b1714834ae7184b0f6a04f15, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733801709579 2024-12-10T03:35:15,415 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d6d7e91767664887b678ec29eebae0fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733801711399 2024-12-10T03:35:15,416 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2236bccd1e074bed98f8b88d3d8dd321, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1733801712593 2024-12-10T03:35:15,424 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 419a7ce5f33ca468fbba7b43cce2b2ec#C#compaction#314 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:15,425 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/04244fe89c1643dcb11a441f71387e34 is 50, key is test_row_0/C:col10/1733801713722/Put/seqid=0 2024-12-10T03:35:15,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742199_1375 (size=12561) 2024-12-10T03:35:15,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T03:35:15,473 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T03:35:15,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:15,474 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T03:35:15,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:15,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:15,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:15,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:15,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:15,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:15,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210cecce4860aac441e82570b4700aa57e2_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801713741/Put/seqid=0 2024-12-10T03:35:15,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742200_1376 (size=12304) 2024-12-10T03:35:15,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T03:35:15,813 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/d63f2aefd69647b7a2141c508e2d63c4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/d63f2aefd69647b7a2141c508e2d63c4 2024-12-10T03:35:15,817 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/A of 419a7ce5f33ca468fbba7b43cce2b2ec into d63f2aefd69647b7a2141c508e2d63c4(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:15,817 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:15,817 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/A, priority=12, startTime=1733801715373; duration=0sec 2024-12-10T03:35:15,817 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:15,817 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:A 2024-12-10T03:35:15,836 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/04244fe89c1643dcb11a441f71387e34 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/04244fe89c1643dcb11a441f71387e34 2024-12-10T03:35:15,840 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 419a7ce5f33ca468fbba7b43cce2b2ec/C of 419a7ce5f33ca468fbba7b43cce2b2ec into 04244fe89c1643dcb11a441f71387e34(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:15,840 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:15,840 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec., storeName=419a7ce5f33ca468fbba7b43cce2b2ec/C, priority=12, startTime=1733801715373; duration=0sec 2024-12-10T03:35:15,840 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:15,840 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:C 2024-12-10T03:35:15,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:15,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:15,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:15,887 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210cecce4860aac441e82570b4700aa57e2_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210cecce4860aac441e82570b4700aa57e2_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:15,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/013364ce4870476e895f3994940af737, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:15,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/013364ce4870476e895f3994940af737 is 175, key is test_row_0/A:col10/1733801713741/Put/seqid=0 2024-12-10T03:35:15,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:15,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801775885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:15,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801775886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742201_1377 (size=31105) 2024-12-10T03:35:15,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:15,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801775888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:15,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801775889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:15,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801775990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:15,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801775991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:15,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801775993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:15,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:15,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801775993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:16,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801776195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:16,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801776195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:16,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801776195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:16,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801776198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T03:35:16,292 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/013364ce4870476e895f3994940af737 2024-12-10T03:35:16,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5e995798d51949b4813d0c9020873a42 is 50, key is test_row_0/B:col10/1733801713741/Put/seqid=0 2024-12-10T03:35:16,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742202_1378 (size=12151) 2024-12-10T03:35:16,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801776499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801776500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,502 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:16,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801776500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:16,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801776501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:16,704 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5e995798d51949b4813d0c9020873a42 2024-12-10T03:35:16,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/f10062400b8a495197a086ee64fe2d06 is 50, key is test_row_0/C:col10/1733801713741/Put/seqid=0 2024-12-10T03:35:16,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742203_1379 (size=12151) 2024-12-10T03:35:17,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:17,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801777003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:17,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801777003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:17,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801777004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:17,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:17,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801777004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:17,113 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/f10062400b8a495197a086ee64fe2d06 2024-12-10T03:35:17,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/013364ce4870476e895f3994940af737 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/013364ce4870476e895f3994940af737 2024-12-10T03:35:17,119 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/013364ce4870476e895f3994940af737, entries=150, sequenceid=207, filesize=30.4 K 2024-12-10T03:35:17,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5e995798d51949b4813d0c9020873a42 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5e995798d51949b4813d0c9020873a42 2024-12-10T03:35:17,123 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5e995798d51949b4813d0c9020873a42, entries=150, sequenceid=207, filesize=11.9 K 2024-12-10T03:35:17,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/f10062400b8a495197a086ee64fe2d06 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/f10062400b8a495197a086ee64fe2d06 2024-12-10T03:35:17,127 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/f10062400b8a495197a086ee64fe2d06, entries=150, sequenceid=207, filesize=11.9 K 2024-12-10T03:35:17,128 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1654ms, sequenceid=207, compaction requested=false 2024-12-10T03:35:17,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:17,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:17,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-10T03:35:17,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-10T03:35:17,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-10T03:35:17,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9590 sec 2024-12-10T03:35:17,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.9620 sec 2024-12-10T03:35:17,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T03:35:17,273 INFO [Thread-1479 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-10T03:35:17,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:17,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-10T03:35:17,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T03:35:17,275 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:17,275 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:17,275 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:17,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T03:35:17,426 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:17,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-10T03:35:17,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:17,427 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T03:35:17,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:17,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:17,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:17,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:17,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:17,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:17,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210231361c777de4f059abaeb86a6998d9c_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801715880/Put/seqid=0 2024-12-10T03:35:17,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742204_1380 (size=12304) 2024-12-10T03:35:17,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T03:35:17,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:17,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. as already flushing 2024-12-10T03:35:17,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:17,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801777792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:17,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:17,839 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210231361c777de4f059abaeb86a6998d9c_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210231361c777de4f059abaeb86a6998d9c_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:17,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/07968a7c599e4a16918b22d1cf7de6ea, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:17,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/07968a7c599e4a16918b22d1cf7de6ea is 175, key is test_row_0/A:col10/1733801715880/Put/seqid=0 2024-12-10T03:35:17,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742205_1381 (size=31105) 2024-12-10T03:35:17,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T03:35:17,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801777897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:18,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:18,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35874 deadline: 1733801778007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:18,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:18,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35806 deadline: 1733801778011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:18,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:18,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35826 deadline: 1733801778014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:18,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:18,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35900 deadline: 1733801778015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:18,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:18,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801778101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:18,244 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=232, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/07968a7c599e4a16918b22d1cf7de6ea 2024-12-10T03:35:18,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/c312f33de954445b891e925aa33c22f8 is 50, key is test_row_0/B:col10/1733801715880/Put/seqid=0 2024-12-10T03:35:18,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742206_1382 (size=12151) 2024-12-10T03:35:18,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T03:35:18,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:18,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801778405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:18,632 DEBUG [Thread-1480 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27539bdc to 127.0.0.1:51621 2024-12-10T03:35:18,632 DEBUG [Thread-1480 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:18,633 DEBUG [Thread-1484 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x798e7fd4 to 127.0.0.1:51621 2024-12-10T03:35:18,633 DEBUG [Thread-1484 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:18,633 DEBUG [Thread-1486 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7284f16d to 127.0.0.1:51621 2024-12-10T03:35:18,633 DEBUG [Thread-1486 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:18,636 DEBUG [Thread-1482 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e3203d9 to 127.0.0.1:51621 2024-12-10T03:35:18,636 DEBUG [Thread-1482 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:18,636 DEBUG [Thread-1488 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37a637ac to 127.0.0.1:51621 2024-12-10T03:35:18,636 DEBUG [Thread-1488 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:18,652 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/c312f33de954445b891e925aa33c22f8 2024-12-10T03:35:18,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/2c724ba1c0f04930bd76036d0e4e1cfd is 50, key is test_row_0/C:col10/1733801715880/Put/seqid=0 2024-12-10T03:35:18,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742207_1383 (size=12151) 2024-12-10T03:35:18,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35808 deadline: 1733801778914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:19,063 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/2c724ba1c0f04930bd76036d0e4e1cfd 2024-12-10T03:35:19,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/07968a7c599e4a16918b22d1cf7de6ea as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/07968a7c599e4a16918b22d1cf7de6ea 2024-12-10T03:35:19,092 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/07968a7c599e4a16918b22d1cf7de6ea, entries=150, sequenceid=232, filesize=30.4 K 2024-12-10T03:35:19,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/c312f33de954445b891e925aa33c22f8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/c312f33de954445b891e925aa33c22f8 2024-12-10T03:35:19,095 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/c312f33de954445b891e925aa33c22f8, entries=150, sequenceid=232, filesize=11.9 K 2024-12-10T03:35:19,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/2c724ba1c0f04930bd76036d0e4e1cfd as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c724ba1c0f04930bd76036d0e4e1cfd 2024-12-10T03:35:19,097 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c724ba1c0f04930bd76036d0e4e1cfd, entries=150, sequenceid=232, filesize=11.9 K 2024-12-10T03:35:19,098 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1671ms, sequenceid=232, compaction requested=true 2024-12-10T03:35:19,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:19,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:19,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-10T03:35:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-10T03:35:19,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-10T03:35:19,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8240 sec 2024-12-10T03:35:19,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.8260 sec 2024-12-10T03:35:19,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T03:35:19,380 INFO [Thread-1479 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-10T03:35:19,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:19,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T03:35:19,923 DEBUG [Thread-1477 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e247aa1 to 127.0.0.1:51621 2024-12-10T03:35:19,923 DEBUG [Thread-1477 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:19,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:19,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:19,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:19,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:19,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:19,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:19,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ba174f4a70664c76986cd2aa1c99ccba_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_0/A:col10/1733801719920/Put/seqid=0 2024-12-10T03:35:19,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742208_1384 (size=12304) 2024-12-10T03:35:20,019 DEBUG [Thread-1473 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cd96549 to 127.0.0.1:51621 2024-12-10T03:35:20,019 DEBUG [Thread-1473 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:20,027 DEBUG [Thread-1469 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc42ea6 to 127.0.0.1:51621 2024-12-10T03:35:20,027 DEBUG [Thread-1469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:20,028 DEBUG [Thread-1475 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31aea41b to 127.0.0.1:51621 2024-12-10T03:35:20,028 DEBUG [Thread-1475 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:20,033 DEBUG [Thread-1471 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x117e86d9 to 127.0.0.1:51621 2024-12-10T03:35:20,033 DEBUG [Thread-1471 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:20,033 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T03:35:20,033 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3348 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10044 rows 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3339 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10016 rows 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3345 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10031 rows 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3336 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10006 rows 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3354 2024-12-10T03:35:20,034 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10060 rows 2024-12-10T03:35:20,034 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T03:35:20,034 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51f7d511 to 127.0.0.1:51621 2024-12-10T03:35:20,034 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:20,037 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T03:35:20,038 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T03:35:20,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:20,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T03:35:20,041 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801720041"}]},"ts":"1733801720041"} 2024-12-10T03:35:20,042 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T03:35:20,058 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T03:35:20,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:35:20,060 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, UNASSIGN}] 2024-12-10T03:35:20,061 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, UNASSIGN 2024-12-10T03:35:20,061 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=419a7ce5f33ca468fbba7b43cce2b2ec, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:20,062 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:35:20,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; CloseRegionProcedure 419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:35:20,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T03:35:20,214 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:20,214 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(124): Close 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:20,214 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:35:20,214 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1681): Closing 419a7ce5f33ca468fbba7b43cce2b2ec, disabling compactions & flushes 2024-12-10T03:35:20,214 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:20,336 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:20,341 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ba174f4a70664c76986cd2aa1c99ccba_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ba174f4a70664c76986cd2aa1c99ccba_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:20,342 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/402bdbc22e4f4dc5a31307fcf16c965d, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:20,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/402bdbc22e4f4dc5a31307fcf16c965d is 175, key is test_row_0/A:col10/1733801719920/Put/seqid=0 2024-12-10T03:35:20,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T03:35:20,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742209_1385 (size=31105) 2024-12-10T03:35:20,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T03:35:20,750 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=246, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/402bdbc22e4f4dc5a31307fcf16c965d 2024-12-10T03:35:20,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/dc12535d1b894ae5a6cefa0786f40829 is 50, key is test_row_0/B:col10/1733801719920/Put/seqid=0 2024-12-10T03:35:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742210_1386 (size=12151) 2024-12-10T03:35:21,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T03:35:21,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/dc12535d1b894ae5a6cefa0786f40829 2024-12-10T03:35:21,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/984261dab72f4119be74bbead5b05fef is 50, key is test_row_0/C:col10/1733801719920/Put/seqid=0 2024-12-10T03:35:21,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742211_1387 (size=12151) 2024-12-10T03:35:21,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/984261dab72f4119be74bbead5b05fef 2024-12-10T03:35:21,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/402bdbc22e4f4dc5a31307fcf16c965d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/402bdbc22e4f4dc5a31307fcf16c965d 2024-12-10T03:35:21,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/402bdbc22e4f4dc5a31307fcf16c965d, entries=150, sequenceid=246, filesize=30.4 K 2024-12-10T03:35:21,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/dc12535d1b894ae5a6cefa0786f40829 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/dc12535d1b894ae5a6cefa0786f40829 2024-12-10T03:35:21,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/dc12535d1b894ae5a6cefa0786f40829, entries=150, sequenceid=246, filesize=11.9 K 2024-12-10T03:35:21,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/984261dab72f4119be74bbead5b05fef as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/984261dab72f4119be74bbead5b05fef 2024-12-10T03:35:21,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/984261dab72f4119be74bbead5b05fef, entries=150, sequenceid=246, filesize=11.9 K 2024-12-10T03:35:21,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=26.84 KB/27480 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1691ms, sequenceid=246, compaction requested=true 2024-12-10T03:35:21,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:21,614 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:21,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. after waiting 0 ms 2024-12-10T03:35:21,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:21,614 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. because compaction request was cancelled 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:21,614 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:A 2024-12-10T03:35:21,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:21,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:21,614 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. because compaction request was cancelled 2024-12-10T03:35:21,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 419a7ce5f33ca468fbba7b43cce2b2ec:C, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:21,614 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2837): Flushing 419a7ce5f33ca468fbba7b43cce2b2ec 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T03:35:21,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:21,614 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. because compaction request was cancelled 2024-12-10T03:35:21,614 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:B 2024-12-10T03:35:21,614 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 419a7ce5f33ca468fbba7b43cce2b2ec:C 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=A 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=B 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 419a7ce5f33ca468fbba7b43cce2b2ec, store=C 2024-12-10T03:35:21,614 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:21,619 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210facf3ce802d44641a3dd2e1076767975_419a7ce5f33ca468fbba7b43cce2b2ec is 50, key is test_row_1/A:col10/1733801720031/Put/seqid=0 2024-12-10T03:35:21,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742212_1388 (size=7324) 2024-12-10T03:35:22,024 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:22,030 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210facf3ce802d44641a3dd2e1076767975_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210facf3ce802d44641a3dd2e1076767975_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:22,031 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/343dbe7b1d244ceb9b22578fbeb1f156, store: [table=TestAcidGuarantees family=A region=419a7ce5f33ca468fbba7b43cce2b2ec] 2024-12-10T03:35:22,032 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/343dbe7b1d244ceb9b22578fbeb1f156 is 175, key is test_row_1/A:col10/1733801720031/Put/seqid=0 2024-12-10T03:35:22,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742213_1389 (size=13815) 2024-12-10T03:35:22,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T03:35:22,438 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/343dbe7b1d244ceb9b22578fbeb1f156 2024-12-10T03:35:22,453 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5e7920d340894ae086eed46220b72ba5 is 50, key is test_row_1/B:col10/1733801720031/Put/seqid=0 2024-12-10T03:35:22,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742214_1390 (size=7365) 2024-12-10T03:35:22,861 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5e7920d340894ae086eed46220b72ba5 2024-12-10T03:35:22,875 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/6e9203ec36f34a99adddb6d69de99146 is 50, key is test_row_1/C:col10/1733801720031/Put/seqid=0 2024-12-10T03:35:22,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742215_1391 (size=7365) 2024-12-10T03:35:23,280 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/6e9203ec36f34a99adddb6d69de99146 2024-12-10T03:35:23,286 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/A/343dbe7b1d244ceb9b22578fbeb1f156 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/343dbe7b1d244ceb9b22578fbeb1f156 2024-12-10T03:35:23,290 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/343dbe7b1d244ceb9b22578fbeb1f156, entries=50, sequenceid=253, filesize=13.5 K 2024-12-10T03:35:23,291 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/B/5e7920d340894ae086eed46220b72ba5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5e7920d340894ae086eed46220b72ba5 2024-12-10T03:35:23,294 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5e7920d340894ae086eed46220b72ba5, entries=50, sequenceid=253, filesize=7.2 K 2024-12-10T03:35:23,295 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/.tmp/C/6e9203ec36f34a99adddb6d69de99146 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/6e9203ec36f34a99adddb6d69de99146 2024-12-10T03:35:23,297 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/6e9203ec36f34a99adddb6d69de99146, entries=50, sequenceid=253, filesize=7.2 K 2024-12-10T03:35:23,298 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 419a7ce5f33ca468fbba7b43cce2b2ec in 1684ms, sequenceid=253, compaction requested=true 2024-12-10T03:35:23,299 DEBUG [StoreCloser-TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/9a79605016144c2ab6b11fce8cbeac7c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/e00673c7b87d4d6f831e9575f8016252, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/06b04e9886f947a6aa3b89104e1e8e8a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/83ec9228e58240459eafdb4b02df3ac2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/7fbc2c2cd7534874a41176a7d7cb2d37, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82719cbba18b4a419eb87c72e03c1692, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/0440367f0d6041479a6a33cf7f00ac1a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/72ba69291d3747e19e386ee56eb001c7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/b7d89f6c74484e8ca61137e9cc2c615c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82c76864cf604a0ba0bd25aa98684271, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/1dbad0b74797472fae11d05bd83bb580, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/cee9eccb012942a8b7c6c09e3639de26] to archive 2024-12-10T03:35:23,299 DEBUG [StoreCloser-TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:23,301 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/e00673c7b87d4d6f831e9575f8016252 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/e00673c7b87d4d6f831e9575f8016252 2024-12-10T03:35:23,301 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/9a79605016144c2ab6b11fce8cbeac7c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/9a79605016144c2ab6b11fce8cbeac7c 2024-12-10T03:35:23,301 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/83ec9228e58240459eafdb4b02df3ac2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/83ec9228e58240459eafdb4b02df3ac2 2024-12-10T03:35:23,301 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/72ba69291d3747e19e386ee56eb001c7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/72ba69291d3747e19e386ee56eb001c7 2024-12-10T03:35:23,301 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/06b04e9886f947a6aa3b89104e1e8e8a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/06b04e9886f947a6aa3b89104e1e8e8a 2024-12-10T03:35:23,301 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82719cbba18b4a419eb87c72e03c1692 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82719cbba18b4a419eb87c72e03c1692 2024-12-10T03:35:23,301 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/0440367f0d6041479a6a33cf7f00ac1a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/0440367f0d6041479a6a33cf7f00ac1a 2024-12-10T03:35:23,301 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/7fbc2c2cd7534874a41176a7d7cb2d37 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/7fbc2c2cd7534874a41176a7d7cb2d37 2024-12-10T03:35:23,302 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/b7d89f6c74484e8ca61137e9cc2c615c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/b7d89f6c74484e8ca61137e9cc2c615c 2024-12-10T03:35:23,302 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/cee9eccb012942a8b7c6c09e3639de26 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/cee9eccb012942a8b7c6c09e3639de26 2024-12-10T03:35:23,302 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/1dbad0b74797472fae11d05bd83bb580 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/1dbad0b74797472fae11d05bd83bb580 2024-12-10T03:35:23,302 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82c76864cf604a0ba0bd25aa98684271 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/82c76864cf604a0ba0bd25aa98684271 2024-12-10T03:35:23,303 DEBUG [StoreCloser-TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/cde87086430a446fa2bd872c02ab15fc, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/ad38f1b601ac44a8b664b96db1e1573e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/2fd39895f7604440a1f88c3f2aed0a28, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6f8d23263ff54593a3e12306082b5c7d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/0fbc3777ddce44afa8b4f215cb1d547c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7107648a428943fc94f8fd5ec2f95c9b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/4264c4e0ae7f4258873a9ce32aee8d88, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/3aeb6139135a47d8889eda2b0fa6ffbf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7706439d09284e539520e48667274406, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6e4c5877b4bb4da4b532c74cfdd0e488, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/704e5b97a2a74730a41467bc35828750, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5f3febfb183b49d6ac8ca0b2b266d023] to archive 2024-12-10T03:35:23,304 DEBUG [StoreCloser-TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:23,305 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/ad38f1b601ac44a8b664b96db1e1573e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/ad38f1b601ac44a8b664b96db1e1573e 2024-12-10T03:35:23,305 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/cde87086430a446fa2bd872c02ab15fc to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/cde87086430a446fa2bd872c02ab15fc 2024-12-10T03:35:23,305 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/2fd39895f7604440a1f88c3f2aed0a28 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/2fd39895f7604440a1f88c3f2aed0a28 2024-12-10T03:35:23,305 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/0fbc3777ddce44afa8b4f215cb1d547c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/0fbc3777ddce44afa8b4f215cb1d547c 2024-12-10T03:35:23,305 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6f8d23263ff54593a3e12306082b5c7d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6f8d23263ff54593a3e12306082b5c7d 2024-12-10T03:35:23,305 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/4264c4e0ae7f4258873a9ce32aee8d88 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/4264c4e0ae7f4258873a9ce32aee8d88 2024-12-10T03:35:23,305 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7107648a428943fc94f8fd5ec2f95c9b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7107648a428943fc94f8fd5ec2f95c9b 2024-12-10T03:35:23,306 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/3aeb6139135a47d8889eda2b0fa6ffbf to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/3aeb6139135a47d8889eda2b0fa6ffbf 2024-12-10T03:35:23,306 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7706439d09284e539520e48667274406 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/7706439d09284e539520e48667274406 2024-12-10T03:35:23,306 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6e4c5877b4bb4da4b532c74cfdd0e488 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/6e4c5877b4bb4da4b532c74cfdd0e488 2024-12-10T03:35:23,306 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5f3febfb183b49d6ac8ca0b2b266d023 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5f3febfb183b49d6ac8ca0b2b266d023 2024-12-10T03:35:23,306 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/704e5b97a2a74730a41467bc35828750 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/704e5b97a2a74730a41467bc35828750 2024-12-10T03:35:23,307 DEBUG [StoreCloser-TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/7d334b7092eb4701a4f240b04721e3a4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/57935f2ffecf4719bf37c0ca12b3b2f3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/536566f1738245dfbc56711b2ae43e29, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d7d7ed0104ad4657a8a7d1812d89da7a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/97c869aff45d4b2e8ebb643902ebd648, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/748e2f0322c54bbd850ab92e3e139042, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c2081d0139042f98d2c799c76f342e6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/949ec78329444e4fb7412c8196a175e3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0aba99f8b1714834ae7184b0f6a04f15, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d6d7e91767664887b678ec29eebae0fd, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2236bccd1e074bed98f8b88d3d8dd321] to archive 2024-12-10T03:35:23,307 DEBUG [StoreCloser-TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:23,309 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/7d334b7092eb4701a4f240b04721e3a4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/7d334b7092eb4701a4f240b04721e3a4 2024-12-10T03:35:23,309 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0bcd48ffe5bd4d6e9a591367f9b5bdb1 2024-12-10T03:35:23,309 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d7d7ed0104ad4657a8a7d1812d89da7a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d7d7ed0104ad4657a8a7d1812d89da7a 2024-12-10T03:35:23,309 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/57935f2ffecf4719bf37c0ca12b3b2f3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/57935f2ffecf4719bf37c0ca12b3b2f3 2024-12-10T03:35:23,309 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c2081d0139042f98d2c799c76f342e6 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c2081d0139042f98d2c799c76f342e6 2024-12-10T03:35:23,309 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/97c869aff45d4b2e8ebb643902ebd648 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/97c869aff45d4b2e8ebb643902ebd648 2024-12-10T03:35:23,309 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/748e2f0322c54bbd850ab92e3e139042 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/748e2f0322c54bbd850ab92e3e139042 2024-12-10T03:35:23,309 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/536566f1738245dfbc56711b2ae43e29 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/536566f1738245dfbc56711b2ae43e29 2024-12-10T03:35:23,310 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0aba99f8b1714834ae7184b0f6a04f15 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/0aba99f8b1714834ae7184b0f6a04f15 2024-12-10T03:35:23,310 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d6d7e91767664887b678ec29eebae0fd to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/d6d7e91767664887b678ec29eebae0fd 2024-12-10T03:35:23,310 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/949ec78329444e4fb7412c8196a175e3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/949ec78329444e4fb7412c8196a175e3 2024-12-10T03:35:23,310 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2236bccd1e074bed98f8b88d3d8dd321 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2236bccd1e074bed98f8b88d3d8dd321 2024-12-10T03:35:23,313 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/recovered.edits/256.seqid, newMaxSeqId=256, maxSeqId=4 2024-12-10T03:35:23,313 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec. 2024-12-10T03:35:23,313 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1635): Region close journal for 419a7ce5f33ca468fbba7b43cce2b2ec: 2024-12-10T03:35:23,314 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(170): Closed 419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:23,315 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=419a7ce5f33ca468fbba7b43cce2b2ec, regionState=CLOSED 2024-12-10T03:35:23,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-10T03:35:23,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; CloseRegionProcedure 419a7ce5f33ca468fbba7b43cce2b2ec, server=50b9ef1c5472,37553,1733801610862 in 3.2530 sec 2024-12-10T03:35:23,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=111 2024-12-10T03:35:23,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=111, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=419a7ce5f33ca468fbba7b43cce2b2ec, UNASSIGN in 3.2560 sec 2024-12-10T03:35:23,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-10T03:35:23,318 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 3.2590 sec 2024-12-10T03:35:23,319 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801723319"}]},"ts":"1733801723319"} 2024-12-10T03:35:23,320 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T03:35:23,366 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T03:35:23,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 3.3280 sec 2024-12-10T03:35:24,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T03:35:24,152 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-10T03:35:24,153 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T03:35:24,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:24,157 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=114, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:24,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T03:35:24,159 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=114, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:24,162 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,166 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/recovered.edits] 2024-12-10T03:35:24,171 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/013364ce4870476e895f3994940af737 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/013364ce4870476e895f3994940af737 2024-12-10T03:35:24,171 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/343dbe7b1d244ceb9b22578fbeb1f156 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/343dbe7b1d244ceb9b22578fbeb1f156 2024-12-10T03:35:24,171 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/07968a7c599e4a16918b22d1cf7de6ea to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/07968a7c599e4a16918b22d1cf7de6ea 2024-12-10T03:35:24,171 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/402bdbc22e4f4dc5a31307fcf16c965d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/402bdbc22e4f4dc5a31307fcf16c965d 2024-12-10T03:35:24,172 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/d63f2aefd69647b7a2141c508e2d63c4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/A/d63f2aefd69647b7a2141c508e2d63c4 2024-12-10T03:35:24,178 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/36f47637ec2f4cde89c4a27ca07ae6f6 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/36f47637ec2f4cde89c4a27ca07ae6f6 2024-12-10T03:35:24,178 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5e7920d340894ae086eed46220b72ba5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5e7920d340894ae086eed46220b72ba5 2024-12-10T03:35:24,178 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/c312f33de954445b891e925aa33c22f8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/c312f33de954445b891e925aa33c22f8 2024-12-10T03:35:24,178 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5e995798d51949b4813d0c9020873a42 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/5e995798d51949b4813d0c9020873a42 2024-12-10T03:35:24,178 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/dc12535d1b894ae5a6cefa0786f40829 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/B/dc12535d1b894ae5a6cefa0786f40829 2024-12-10T03:35:24,184 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/04244fe89c1643dcb11a441f71387e34 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/04244fe89c1643dcb11a441f71387e34 2024-12-10T03:35:24,184 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c724ba1c0f04930bd76036d0e4e1cfd to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/2c724ba1c0f04930bd76036d0e4e1cfd 2024-12-10T03:35:24,185 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/6e9203ec36f34a99adddb6d69de99146 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/6e9203ec36f34a99adddb6d69de99146 2024-12-10T03:35:24,185 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/f10062400b8a495197a086ee64fe2d06 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/f10062400b8a495197a086ee64fe2d06 2024-12-10T03:35:24,185 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/984261dab72f4119be74bbead5b05fef to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/C/984261dab72f4119be74bbead5b05fef 2024-12-10T03:35:24,189 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/recovered.edits/256.seqid to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec/recovered.edits/256.seqid 2024-12-10T03:35:24,190 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,190 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T03:35:24,190 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T03:35:24,192 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T03:35:24,202 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101e878b2320ed47068cd6352f24b303d4_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101e878b2320ed47068cd6352f24b303d4_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,202 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210231361c777de4f059abaeb86a6998d9c_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210231361c777de4f059abaeb86a6998d9c_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,202 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103ab256930d004eb3b42dd421ec9d8660_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103ab256930d004eb3b42dd421ec9d8660_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,202 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101438842b27a54d829f0354ae9ecc4cd2_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101438842b27a54d829f0354ae9ecc4cd2_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,202 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121053d07536ba9f4c5993c2e745da8d610a_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121053d07536ba9f4c5993c2e745da8d610a_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,203 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068f1428cc34f4f3ba8bc41112cbc0e89_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068f1428cc34f4f3ba8bc41112cbc0e89_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,203 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121071a409ee91a94d889894008e2c323987_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121071a409ee91a94d889894008e2c323987_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,203 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108dc68046d16644509feee2b869199cab_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108dc68046d16644509feee2b869199cab_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,204 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121091e13c63c3f54c74851bd732bca970d6_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121091e13c63c3f54c74851bd732bca970d6_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,204 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ba174f4a70664c76986cd2aa1c99ccba_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ba174f4a70664c76986cd2aa1c99ccba_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,204 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210facf3ce802d44641a3dd2e1076767975_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210facf3ce802d44641a3dd2e1076767975_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,205 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d527eed9628b4dada1f84eb543930b19_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d527eed9628b4dada1f84eb543930b19_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,205 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210cecce4860aac441e82570b4700aa57e2_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210cecce4860aac441e82570b4700aa57e2_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,205 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fbcacca8eafe4c89aefa8ddcc867e281_419a7ce5f33ca468fbba7b43cce2b2ec to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fbcacca8eafe4c89aefa8ddcc867e281_419a7ce5f33ca468fbba7b43cce2b2ec 2024-12-10T03:35:24,205 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T03:35:24,208 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=114, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:24,210 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T03:35:24,212 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T03:35:24,213 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=114, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:24,213 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T03:35:24,213 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733801724213"}]},"ts":"9223372036854775807"} 2024-12-10T03:35:24,214 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T03:35:24,214 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 419a7ce5f33ca468fbba7b43cce2b2ec, NAME => 'TestAcidGuarantees,,1733801695391.419a7ce5f33ca468fbba7b43cce2b2ec.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T03:35:24,214 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T03:35:24,214 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733801724214"}]},"ts":"9223372036854775807"} 2024-12-10T03:35:24,215 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T03:35:24,226 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=114, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:24,227 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 72 msec 2024-12-10T03:35:24,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T03:35:24,261 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-10T03:35:24,274 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=245 (was 247), OpenFileDescriptor=452 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=268 (was 281), ProcessCount=11 (was 11), AvailableMemoryMB=3301 (was 3320) 2024-12-10T03:35:24,283 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=245, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=268, ProcessCount=11, AvailableMemoryMB=3300 2024-12-10T03:35:24,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:35:24,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:35:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:24,286 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T03:35:24,287 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:24,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 115 2024-12-10T03:35:24,287 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T03:35:24,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T03:35:24,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742216_1392 (size=963) 2024-12-10T03:35:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T03:35:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T03:35:24,694 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:35:24,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742217_1393 (size=53) 2024-12-10T03:35:24,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T03:35:25,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:35:25,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b427273a9970e4bd4ba7fd36e4602947, disabling compactions & flushes 2024-12-10T03:35:25,101 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:25,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:25,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. after waiting 0 ms 2024-12-10T03:35:25,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:25,101 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:25,101 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:25,103 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T03:35:25,104 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733801725103"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733801725103"}]},"ts":"1733801725103"} 2024-12-10T03:35:25,105 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T03:35:25,106 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T03:35:25,106 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801725106"}]},"ts":"1733801725106"} 2024-12-10T03:35:25,108 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T03:35:25,150 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b427273a9970e4bd4ba7fd36e4602947, ASSIGN}] 2024-12-10T03:35:25,152 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b427273a9970e4bd4ba7fd36e4602947, ASSIGN 2024-12-10T03:35:25,153 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b427273a9970e4bd4ba7fd36e4602947, ASSIGN; state=OFFLINE, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=false 2024-12-10T03:35:25,304 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=b427273a9970e4bd4ba7fd36e4602947, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:25,307 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; OpenRegionProcedure b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:35:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T03:35:25,460 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:25,466 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:25,467 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7285): Opening region: {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:35:25,468 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,468 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:35:25,468 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7327): checking encryption for b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,468 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7330): checking classloading for b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,471 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,473 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:25,474 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b427273a9970e4bd4ba7fd36e4602947 columnFamilyName A 2024-12-10T03:35:25,474 DEBUG [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:25,474 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.HStore(327): Store=b427273a9970e4bd4ba7fd36e4602947/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:25,475 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,476 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:25,476 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b427273a9970e4bd4ba7fd36e4602947 columnFamilyName B 2024-12-10T03:35:25,477 DEBUG [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:25,477 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.HStore(327): Store=b427273a9970e4bd4ba7fd36e4602947/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:25,477 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,479 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:25,479 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b427273a9970e4bd4ba7fd36e4602947 columnFamilyName C 2024-12-10T03:35:25,479 DEBUG [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:25,480 INFO [StoreOpener-b427273a9970e4bd4ba7fd36e4602947-1 {}] regionserver.HStore(327): Store=b427273a9970e4bd4ba7fd36e4602947/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:25,480 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:25,480 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,481 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,482 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:35:25,482 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1085): writing seq id for b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:25,484 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:35:25,484 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1102): Opened b427273a9970e4bd4ba7fd36e4602947; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59603816, jitterRate=-0.11183393001556396}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:35:25,485 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1001): Region open journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:25,485 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., pid=117, masterSystemTime=1733801725460 2024-12-10T03:35:25,486 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:25,486 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:25,487 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=b427273a9970e4bd4ba7fd36e4602947, regionState=OPEN, openSeqNum=2, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:25,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-10T03:35:25,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; OpenRegionProcedure b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 in 181 msec 2024-12-10T03:35:25,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-12-10T03:35:25,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b427273a9970e4bd4ba7fd36e4602947, ASSIGN in 339 msec 2024-12-10T03:35:25,490 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T03:35:25,490 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801725490"}]},"ts":"1733801725490"} 2024-12-10T03:35:25,491 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T03:35:25,504 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=115, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T03:35:25,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2190 sec 2024-12-10T03:35:26,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-10T03:35:26,398 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 115 completed 2024-12-10T03:35:26,401 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x695c2253 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63cefe40 2024-12-10T03:35:26,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32c12a30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,422 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,425 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55766, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,427 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T03:35:26,428 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T03:35:26,431 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7177efc9 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65df2359 2024-12-10T03:35:26,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef40578, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,443 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61d38088 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d0ab200 2024-12-10T03:35:26,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32bb71c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,452 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7043f683 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5871c039 2024-12-10T03:35:26,459 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc0f7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,460 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b0c2472 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7daa5922 2024-12-10T03:35:26,468 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8b6e04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,468 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34b30c39 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b7f20c4 2024-12-10T03:35:26,476 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc486e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,476 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f7c40ba to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2070263a 2024-12-10T03:35:26,484 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7861b162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,485 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-12-10T03:35:26,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,493 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f2423f3 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dd48863 2024-12-10T03:35:26,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a917b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,501 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x184771cf to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51196534 2024-12-10T03:35:26,509 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c2725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,510 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x076f0408 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc5e114 2024-12-10T03:35:26,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d49886, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:26,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:26,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-10T03:35:26,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T03:35:26,521 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:26,521 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:26,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:26,525 DEBUG [hconnection-0x5c4b00b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,526 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55776, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,527 DEBUG [hconnection-0x2c237261-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,528 DEBUG [hconnection-0x2f25bae5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,528 DEBUG [hconnection-0x3f6afc04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,528 DEBUG [hconnection-0x30594e64-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,528 DEBUG [hconnection-0x39ac812d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,528 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,528 DEBUG [hconnection-0x6eec81ef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,529 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,529 DEBUG [hconnection-0x245929b0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,529 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,529 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,529 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55810, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,529 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,529 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,530 DEBUG [hconnection-0x5fef319e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,531 DEBUG [hconnection-0x42232129-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:26,531 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55856, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,532 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:26,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:26,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T03:35:26,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:26,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:26,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:26,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:26,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:26,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:26,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801786541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801786541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801786542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801786542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801786542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/e97962c9fe204bec91870620e04372c5 is 50, key is test_row_0/A:col10/1733801726534/Put/seqid=0 2024-12-10T03:35:26,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742218_1394 (size=12001) 2024-12-10T03:35:26,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T03:35:26,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801786643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801786643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801786643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801786643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801786643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:26,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:26,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:26,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:26,672 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T03:35:26,824 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:26,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:26,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:26,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:26,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801786847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801786847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801786847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801786847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:26,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801786848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,976 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:26,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/e97962c9fe204bec91870620e04372c5 2024-12-10T03:35:26,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:26,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:26,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:26,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:26,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:26,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/0aded5c1340f4dafae9c2affec5eb238 is 50, key is test_row_0/B:col10/1733801726534/Put/seqid=0 2024-12-10T03:35:27,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742219_1395 (size=12001) 2024-12-10T03:35:27,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T03:35:27,128 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:27,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:27,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801787152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801787152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801787153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801787153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801787153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,280 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:27,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:27,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,401 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/0aded5c1340f4dafae9c2affec5eb238 2024-12-10T03:35:27,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/4a7a492374d94d71a3590593f33ba582 is 50, key is test_row_0/C:col10/1733801726534/Put/seqid=0 2024-12-10T03:35:27,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742220_1396 (size=12001) 2024-12-10T03:35:27,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:27,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:27,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,584 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:27,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:27,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T03:35:27,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801787655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801787655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801787655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801787655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:27,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801787657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,737 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:27,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:27,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:27,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/4a7a492374d94d71a3590593f33ba582 2024-12-10T03:35:27,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/e97962c9fe204bec91870620e04372c5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e97962c9fe204bec91870620e04372c5 2024-12-10T03:35:27,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e97962c9fe204bec91870620e04372c5, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T03:35:27,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/0aded5c1340f4dafae9c2affec5eb238 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0aded5c1340f4dafae9c2affec5eb238 2024-12-10T03:35:27,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0aded5c1340f4dafae9c2affec5eb238, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T03:35:27,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/4a7a492374d94d71a3590593f33ba582 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/4a7a492374d94d71a3590593f33ba582 2024-12-10T03:35:27,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/4a7a492374d94d71a3590593f33ba582, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T03:35:27,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for b427273a9970e4bd4ba7fd36e4602947 in 1299ms, sequenceid=16, compaction requested=false 2024-12-10T03:35:27,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:27,889 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:27,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-10T03:35:27,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:27,890 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T03:35:27,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:27,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:27,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:27,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:27,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:27,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:27,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/a12597f4238a44f9ad4a317326ce0f99 is 50, key is test_row_0/A:col10/1733801726540/Put/seqid=0 2024-12-10T03:35:27,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742221_1397 (size=12001) 2024-12-10T03:35:27,896 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/a12597f4238a44f9ad4a317326ce0f99 2024-12-10T03:35:27,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/b2d44bb527d24becadd7389971c811e5 is 50, key is test_row_0/B:col10/1733801726540/Put/seqid=0 2024-12-10T03:35:27,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742222_1398 (size=12001) 2024-12-10T03:35:28,306 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/b2d44bb527d24becadd7389971c811e5 2024-12-10T03:35:28,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/f73a5969b5494114afb7e9fc4c84def2 is 50, key is test_row_0/C:col10/1733801726540/Put/seqid=0 2024-12-10T03:35:28,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742223_1399 (size=12001) 2024-12-10T03:35:28,314 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/f73a5969b5494114afb7e9fc4c84def2 2024-12-10T03:35:28,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/a12597f4238a44f9ad4a317326ce0f99 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/a12597f4238a44f9ad4a317326ce0f99 2024-12-10T03:35:28,324 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/a12597f4238a44f9ad4a317326ce0f99, entries=150, sequenceid=38, filesize=11.7 K 2024-12-10T03:35:28,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/b2d44bb527d24becadd7389971c811e5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/b2d44bb527d24becadd7389971c811e5 2024-12-10T03:35:28,329 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/b2d44bb527d24becadd7389971c811e5, entries=150, sequenceid=38, filesize=11.7 K 2024-12-10T03:35:28,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/f73a5969b5494114afb7e9fc4c84def2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/f73a5969b5494114afb7e9fc4c84def2 2024-12-10T03:35:28,334 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/f73a5969b5494114afb7e9fc4c84def2, entries=150, sequenceid=38, filesize=11.7 K 2024-12-10T03:35:28,334 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for b427273a9970e4bd4ba7fd36e4602947 in 444ms, sequenceid=38, compaction requested=false 2024-12-10T03:35:28,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:28,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:28,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-10T03:35:28,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-10T03:35:28,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-10T03:35:28,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8140 sec 2024-12-10T03:35:28,337 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.8160 sec 2024-12-10T03:35:28,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T03:35:28,624 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-10T03:35:28,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:28,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-10T03:35:28,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-10T03:35:28,626 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:28,626 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:28,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:28,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:28,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:35:28,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:28,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:28,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:28,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:28,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:28,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:28,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/e1f3a07718c84437bf5063df5cbb0c72 is 50, key is test_row_0/A:col10/1733801728670/Put/seqid=0 2024-12-10T03:35:28,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742224_1400 (size=14341) 2024-12-10T03:35:28,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/e1f3a07718c84437bf5063df5cbb0c72 2024-12-10T03:35:28,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/4284a9eafb7c4d4e9b5dccb2f4506ac3 is 50, key is test_row_0/B:col10/1733801728670/Put/seqid=0 2024-12-10T03:35:28,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742225_1401 (size=12001) 2024-12-10T03:35:28,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/4284a9eafb7c4d4e9b5dccb2f4506ac3 2024-12-10T03:35:28,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/8464516a74d74576aa082e649c43df1c is 50, key is test_row_0/C:col10/1733801728670/Put/seqid=0 2024-12-10T03:35:28,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742226_1402 (size=12001) 2024-12-10T03:35:28,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801788694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801788694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801788695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801788695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801788699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-10T03:35:28,777 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-10T03:35:28,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:28,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:28,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:28,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:28,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:28,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:28,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801788800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801788802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801788802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801788803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:28,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801788806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,833 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T03:35:28,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-10T03:35:28,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:28,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-10T03:35:28,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:28,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:28,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:28,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:28,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:28,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:29,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801789005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801789008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801789008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801789009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801789011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,082 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-10T03:35:29,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:29,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:29,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:29,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:29,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:29,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:29,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/8464516a74d74576aa082e649c43df1c 2024-12-10T03:35:29,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/e1f3a07718c84437bf5063df5cbb0c72 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e1f3a07718c84437bf5063df5cbb0c72 2024-12-10T03:35:29,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e1f3a07718c84437bf5063df5cbb0c72, entries=200, sequenceid=49, filesize=14.0 K 2024-12-10T03:35:29,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/4284a9eafb7c4d4e9b5dccb2f4506ac3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/4284a9eafb7c4d4e9b5dccb2f4506ac3 2024-12-10T03:35:29,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/4284a9eafb7c4d4e9b5dccb2f4506ac3, entries=150, sequenceid=49, filesize=11.7 K 2024-12-10T03:35:29,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/8464516a74d74576aa082e649c43df1c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8464516a74d74576aa082e649c43df1c 2024-12-10T03:35:29,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8464516a74d74576aa082e649c43df1c, entries=150, sequenceid=49, filesize=11.7 K 2024-12-10T03:35:29,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b427273a9970e4bd4ba7fd36e4602947 in 481ms, sequenceid=49, compaction requested=true 2024-12-10T03:35:29,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:29,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:29,152 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:29,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:29,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:29,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:29,152 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:29,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:29,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:29,152 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:29,152 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:29,152 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/A is initiating minor compaction (all files) 2024-12-10T03:35:29,152 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/B is initiating minor compaction (all files) 2024-12-10T03:35:29,152 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/B in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:29,152 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/A in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:29,153 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0aded5c1340f4dafae9c2affec5eb238, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/b2d44bb527d24becadd7389971c811e5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/4284a9eafb7c4d4e9b5dccb2f4506ac3] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=35.2 K 2024-12-10T03:35:29,153 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e97962c9fe204bec91870620e04372c5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/a12597f4238a44f9ad4a317326ce0f99, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e1f3a07718c84437bf5063df5cbb0c72] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=37.4 K 2024-12-10T03:35:29,153 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 0aded5c1340f4dafae9c2affec5eb238, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801726532 2024-12-10T03:35:29,153 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting e97962c9fe204bec91870620e04372c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801726532 2024-12-10T03:35:29,153 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting b2d44bb527d24becadd7389971c811e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733801726540 2024-12-10T03:35:29,153 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a12597f4238a44f9ad4a317326ce0f99, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733801726540 2024-12-10T03:35:29,153 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting e1f3a07718c84437bf5063df5cbb0c72, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801728669 2024-12-10T03:35:29,153 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 4284a9eafb7c4d4e9b5dccb2f4506ac3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801728669 2024-12-10T03:35:29,159 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#A#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:29,159 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#B#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:29,160 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/848031288f0546dfb3e0d579cad96cac is 50, key is test_row_0/A:col10/1733801728670/Put/seqid=0 2024-12-10T03:35:29,160 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/8475629027d3420b8cc7ac26f37885d6 is 50, key is test_row_0/B:col10/1733801728670/Put/seqid=0 2024-12-10T03:35:29,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742227_1403 (size=12104) 2024-12-10T03:35:29,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742228_1404 (size=12104) 2024-12-10T03:35:29,169 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/8475629027d3420b8cc7ac26f37885d6 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8475629027d3420b8cc7ac26f37885d6 2024-12-10T03:35:29,173 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/B of b427273a9970e4bd4ba7fd36e4602947 into 8475629027d3420b8cc7ac26f37885d6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:29,173 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:29,173 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/B, priority=13, startTime=1733801729152; duration=0sec 2024-12-10T03:35:29,173 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:29,173 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:B 2024-12-10T03:35:29,173 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:29,174 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:29,174 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/C is initiating minor compaction (all files) 2024-12-10T03:35:29,174 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/C in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:29,174 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/4a7a492374d94d71a3590593f33ba582, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/f73a5969b5494114afb7e9fc4c84def2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8464516a74d74576aa082e649c43df1c] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=35.2 K 2024-12-10T03:35:29,174 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a7a492374d94d71a3590593f33ba582, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801726532 2024-12-10T03:35:29,174 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f73a5969b5494114afb7e9fc4c84def2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733801726540 2024-12-10T03:35:29,175 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 8464516a74d74576aa082e649c43df1c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801728669 2024-12-10T03:35:29,179 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#C#compaction#338 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:29,179 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/249464a1e27a416a9fd13851bc598e3b is 50, key is test_row_0/C:col10/1733801728670/Put/seqid=0 2024-12-10T03:35:29,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742229_1405 (size=12104) 2024-12-10T03:35:29,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-10T03:35:29,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-10T03:35:29,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:29,235 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:35:29,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:29,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:29,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:29,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:29,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:29,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:29,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/983180dc9a7f4850a6d0c74043bead87 is 50, key is test_row_0/A:col10/1733801728695/Put/seqid=0 2024-12-10T03:35:29,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742230_1406 (size=12001) 2024-12-10T03:35:29,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:29,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:29,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801789317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801789317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801789318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801789320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801789321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,372 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T03:35:29,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801789422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801789422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801789423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801789425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801789425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,568 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/848031288f0546dfb3e0d579cad96cac as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/848031288f0546dfb3e0d579cad96cac 2024-12-10T03:35:29,572 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/A of b427273a9970e4bd4ba7fd36e4602947 into 848031288f0546dfb3e0d579cad96cac(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:29,572 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:29,572 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/A, priority=13, startTime=1733801729151; duration=0sec 2024-12-10T03:35:29,572 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:29,572 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:A 2024-12-10T03:35:29,586 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/249464a1e27a416a9fd13851bc598e3b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/249464a1e27a416a9fd13851bc598e3b 2024-12-10T03:35:29,590 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/C of b427273a9970e4bd4ba7fd36e4602947 into 249464a1e27a416a9fd13851bc598e3b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:29,590 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:29,590 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/C, priority=13, startTime=1733801729152; duration=0sec 2024-12-10T03:35:29,590 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:29,590 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:C 2024-12-10T03:35:29,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801789627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801789627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801789627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801789627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,632 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801789629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,642 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/983180dc9a7f4850a6d0c74043bead87 2024-12-10T03:35:29,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/60f1b4cf6c88469a9c3277183c0ec468 is 50, key is test_row_0/B:col10/1733801728695/Put/seqid=0 2024-12-10T03:35:29,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742231_1407 (size=12001) 2024-12-10T03:35:29,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-10T03:35:29,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801789929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801789931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801789931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801789933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:29,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:29,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801789934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:30,054 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/60f1b4cf6c88469a9c3277183c0ec468 2024-12-10T03:35:30,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/8d292c594c694bab9cb9ca84342db432 is 50, key is test_row_0/C:col10/1733801728695/Put/seqid=0 2024-12-10T03:35:30,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742232_1408 (size=12001) 2024-12-10T03:35:30,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801790433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:30,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801790434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:30,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:30,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801790437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:30,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801790438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:30,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801790439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:30,486 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/8d292c594c694bab9cb9ca84342db432 2024-12-10T03:35:30,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/983180dc9a7f4850a6d0c74043bead87 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/983180dc9a7f4850a6d0c74043bead87 2024-12-10T03:35:30,492 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/983180dc9a7f4850a6d0c74043bead87, entries=150, sequenceid=75, filesize=11.7 K 2024-12-10T03:35:30,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/60f1b4cf6c88469a9c3277183c0ec468 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60f1b4cf6c88469a9c3277183c0ec468 2024-12-10T03:35:30,496 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60f1b4cf6c88469a9c3277183c0ec468, entries=150, sequenceid=75, filesize=11.7 K 2024-12-10T03:35:30,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/8d292c594c694bab9cb9ca84342db432 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8d292c594c694bab9cb9ca84342db432 2024-12-10T03:35:30,499 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8d292c594c694bab9cb9ca84342db432, entries=150, sequenceid=75, filesize=11.7 K 2024-12-10T03:35:30,499 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b427273a9970e4bd4ba7fd36e4602947 in 1265ms, sequenceid=75, compaction requested=false 2024-12-10T03:35:30,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:30,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:30,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-10T03:35:30,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-10T03:35:30,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-10T03:35:30,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8740 sec 2024-12-10T03:35:30,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.8760 sec 2024-12-10T03:35:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-10T03:35:30,729 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-10T03:35:30,730 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-10T03:35:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T03:35:30,731 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:30,731 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:30,731 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:30,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T03:35:30,882 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:30,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T03:35:30,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:30,883 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:35:30,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:30,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:30,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:30,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:30,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:30,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/84c7a984ac3649d28b11c272be1a8039 is 50, key is test_row_0/A:col10/1733801729320/Put/seqid=0 2024-12-10T03:35:30,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742233_1409 (size=12001) 2024-12-10T03:35:31,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T03:35:31,291 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/84c7a984ac3649d28b11c272be1a8039 2024-12-10T03:35:31,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/d0e152c6bb6045e2be44e304e79a5727 is 50, key is test_row_0/B:col10/1733801729320/Put/seqid=0 2024-12-10T03:35:31,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742234_1410 (size=12001) 2024-12-10T03:35:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T03:35:31,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:31,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:31,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801791466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801791467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801791468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801791469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801791469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801791571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801791571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801791572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801791575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801791575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,700 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/d0e152c6bb6045e2be44e304e79a5727 2024-12-10T03:35:31,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a4e3f4f8098f4375b8de1386c227ce54 is 50, key is test_row_0/C:col10/1733801729320/Put/seqid=0 2024-12-10T03:35:31,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742235_1411 (size=12001) 2024-12-10T03:35:31,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801791775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801791776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801791776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801791779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,782 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801791780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:31,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T03:35:32,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801792078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801792079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801792080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801792083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801792085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,109 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a4e3f4f8098f4375b8de1386c227ce54 2024-12-10T03:35:32,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/84c7a984ac3649d28b11c272be1a8039 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/84c7a984ac3649d28b11c272be1a8039 2024-12-10T03:35:32,118 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/84c7a984ac3649d28b11c272be1a8039, entries=150, sequenceid=88, filesize=11.7 K 2024-12-10T03:35:32,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/d0e152c6bb6045e2be44e304e79a5727 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/d0e152c6bb6045e2be44e304e79a5727 2024-12-10T03:35:32,123 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/d0e152c6bb6045e2be44e304e79a5727, entries=150, sequenceid=88, filesize=11.7 K 2024-12-10T03:35:32,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a4e3f4f8098f4375b8de1386c227ce54 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a4e3f4f8098f4375b8de1386c227ce54 2024-12-10T03:35:32,126 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a4e3f4f8098f4375b8de1386c227ce54, entries=150, sequenceid=88, filesize=11.7 K 2024-12-10T03:35:32,127 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b427273a9970e4bd4ba7fd36e4602947 in 1244ms, sequenceid=88, compaction requested=true 2024-12-10T03:35:32,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:32,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:32,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-10T03:35:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-10T03:35:32,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-10T03:35:32,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3970 sec 2024-12-10T03:35:32,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.3990 sec 2024-12-10T03:35:32,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:32,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T03:35:32,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:32,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:32,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:32,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:32,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:32,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:32,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/119a71f483f449fc8f5abda62d01f89b is 50, key is test_row_0/A:col10/1733801731468/Put/seqid=0 2024-12-10T03:35:32,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742236_1412 (size=16681) 2024-12-10T03:35:32,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801792592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801792593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801792593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801792597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801792598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801792699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801792699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801792700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801792702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801792702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T03:35:32,834 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-10T03:35:32,835 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:32,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-10T03:35:32,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T03:35:32,836 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:32,837 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:32,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:32,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801792901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801792901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801792904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801792904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:32,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801792904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T03:35:32,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:32,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T03:35:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:32,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:32,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:32,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:32,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:32,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/119a71f483f449fc8f5abda62d01f89b 2024-12-10T03:35:32,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/29f90ad026e4429ea7be854f9b57c8b7 is 50, key is test_row_0/B:col10/1733801731468/Put/seqid=0 2024-12-10T03:35:33,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742237_1413 (size=12001) 2024-12-10T03:35:33,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/29f90ad026e4429ea7be854f9b57c8b7 2024-12-10T03:35:33,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/89eabd3380b4442e9b7f641fe129d655 is 50, key is test_row_0/C:col10/1733801731468/Put/seqid=0 2024-12-10T03:35:33,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742238_1414 (size=12001) 2024-12-10T03:35:33,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/89eabd3380b4442e9b7f641fe129d655 2024-12-10T03:35:33,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/119a71f483f449fc8f5abda62d01f89b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/119a71f483f449fc8f5abda62d01f89b 2024-12-10T03:35:33,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/119a71f483f449fc8f5abda62d01f89b, entries=250, sequenceid=115, filesize=16.3 K 2024-12-10T03:35:33,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/29f90ad026e4429ea7be854f9b57c8b7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/29f90ad026e4429ea7be854f9b57c8b7 2024-12-10T03:35:33,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/29f90ad026e4429ea7be854f9b57c8b7, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T03:35:33,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/89eabd3380b4442e9b7f641fe129d655 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/89eabd3380b4442e9b7f641fe129d655 2024-12-10T03:35:33,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/89eabd3380b4442e9b7f641fe129d655, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T03:35:33,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for b427273a9970e4bd4ba7fd36e4602947 in 440ms, sequenceid=115, compaction requested=true 2024-12-10T03:35:33,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:33,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:33,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:33,026 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:35:33,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:33,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:33,026 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:35:33,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:33,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:33,027 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52787 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:35:33,027 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:35:33,027 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/B is initiating minor compaction (all files) 2024-12-10T03:35:33,027 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/A is initiating minor compaction (all files) 2024-12-10T03:35:33,027 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/B in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:33,027 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/A in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:33,028 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8475629027d3420b8cc7ac26f37885d6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60f1b4cf6c88469a9c3277183c0ec468, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/d0e152c6bb6045e2be44e304e79a5727, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/29f90ad026e4429ea7be854f9b57c8b7] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=47.0 K 2024-12-10T03:35:33,028 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/848031288f0546dfb3e0d579cad96cac, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/983180dc9a7f4850a6d0c74043bead87, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/84c7a984ac3649d28b11c272be1a8039, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/119a71f483f449fc8f5abda62d01f89b] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=51.5 K 2024-12-10T03:35:33,028 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 8475629027d3420b8cc7ac26f37885d6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801728669 2024-12-10T03:35:33,028 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 848031288f0546dfb3e0d579cad96cac, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801728669 2024-12-10T03:35:33,028 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 60f1b4cf6c88469a9c3277183c0ec468, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733801728694 2024-12-10T03:35:33,028 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 983180dc9a7f4850a6d0c74043bead87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733801728694 2024-12-10T03:35:33,028 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d0e152c6bb6045e2be44e304e79a5727, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733801729317 2024-12-10T03:35:33,028 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84c7a984ac3649d28b11c272be1a8039, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733801729317 2024-12-10T03:35:33,028 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 29f90ad026e4429ea7be854f9b57c8b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733801731465 2024-12-10T03:35:33,028 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 119a71f483f449fc8f5abda62d01f89b, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733801731461 2024-12-10T03:35:33,034 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#A#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:33,034 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/861e5c25e5a7435a93d1d996ad3d7606 is 50, key is test_row_0/A:col10/1733801731468/Put/seqid=0 2024-12-10T03:35:33,035 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#B#compaction#349 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:33,035 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/7e4711e511bc4f48a97f6b7fe5fd06d0 is 50, key is test_row_0/B:col10/1733801731468/Put/seqid=0 2024-12-10T03:35:33,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742239_1415 (size=12241) 2024-12-10T03:35:33,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742240_1416 (size=12241) 2024-12-10T03:35:33,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T03:35:33,140 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T03:35:33,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:33,141 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-10T03:35:33,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:33,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:33,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:33,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:33,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:33,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:33,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/ea7b4e3707ed4e5dbbb6750b3a831f09 is 50, key is test_row_0/A:col10/1733801732597/Put/seqid=0 2024-12-10T03:35:33,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742241_1417 (size=9657) 2024-12-10T03:35:33,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:33,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:33,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801793230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801793230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801793232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801793232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801793233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801793333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801793335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801793337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801793337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801793343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T03:35:33,441 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/861e5c25e5a7435a93d1d996ad3d7606 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/861e5c25e5a7435a93d1d996ad3d7606 2024-12-10T03:35:33,441 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/7e4711e511bc4f48a97f6b7fe5fd06d0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/7e4711e511bc4f48a97f6b7fe5fd06d0 2024-12-10T03:35:33,445 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/A of b427273a9970e4bd4ba7fd36e4602947 into 861e5c25e5a7435a93d1d996ad3d7606(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:33,445 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:33,445 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/A, priority=12, startTime=1733801733026; duration=0sec 2024-12-10T03:35:33,445 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:33,445 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:A 2024-12-10T03:35:33,445 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:35:33,445 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/B of b427273a9970e4bd4ba7fd36e4602947 into 7e4711e511bc4f48a97f6b7fe5fd06d0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:33,445 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:33,445 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/B, priority=12, startTime=1733801733026; duration=0sec 2024-12-10T03:35:33,445 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:33,445 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:B 2024-12-10T03:35:33,446 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:35:33,446 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/C is initiating minor compaction (all files) 2024-12-10T03:35:33,446 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/C in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:33,446 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/249464a1e27a416a9fd13851bc598e3b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8d292c594c694bab9cb9ca84342db432, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a4e3f4f8098f4375b8de1386c227ce54, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/89eabd3380b4442e9b7f641fe129d655] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=47.0 K 2024-12-10T03:35:33,446 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 249464a1e27a416a9fd13851bc598e3b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733801728669 2024-12-10T03:35:33,447 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d292c594c694bab9cb9ca84342db432, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733801728694 2024-12-10T03:35:33,447 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4e3f4f8098f4375b8de1386c227ce54, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733801729317 2024-12-10T03:35:33,447 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89eabd3380b4442e9b7f641fe129d655, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733801731465 2024-12-10T03:35:33,456 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#C#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T03:35:33,456 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/7af951a10bc84bdba76566a5f5c01c4f is 50, key is test_row_0/C:col10/1733801731468/Put/seqid=0 2024-12-10T03:35:33,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742242_1418 (size=12241) 2024-12-10T03:35:33,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801793539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801793541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801793541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801793543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,548 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/ea7b4e3707ed4e5dbbb6750b3a831f09 2024-12-10T03:35:33,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801793551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/3cd6433654484322ad7a07351fb3e8f0 is 50, key is test_row_0/B:col10/1733801732597/Put/seqid=0 2024-12-10T03:35:33,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742243_1419 (size=9657) 2024-12-10T03:35:33,610 DEBUG [regionserver/50b9ef1c5472:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.07692307692307693, tune throughput to 53.85 MB/second 2024-12-10T03:35:33,611 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/50b9ef1c5472:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e97962c9fe204bec91870620e04372c5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/a12597f4238a44f9ad4a317326ce0f99, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e1f3a07718c84437bf5063df5cbb0c72, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/848031288f0546dfb3e0d579cad96cac, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/983180dc9a7f4850a6d0c74043bead87, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/84c7a984ac3649d28b11c272be1a8039, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/119a71f483f449fc8f5abda62d01f89b] to archive 2024-12-10T03:35:33,612 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/50b9ef1c5472:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:33,613 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e97962c9fe204bec91870620e04372c5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e97962c9fe204bec91870620e04372c5 2024-12-10T03:35:33,613 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/848031288f0546dfb3e0d579cad96cac to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/848031288f0546dfb3e0d579cad96cac 2024-12-10T03:35:33,613 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/983180dc9a7f4850a6d0c74043bead87 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/983180dc9a7f4850a6d0c74043bead87 2024-12-10T03:35:33,614 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e1f3a07718c84437bf5063df5cbb0c72 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/e1f3a07718c84437bf5063df5cbb0c72 2024-12-10T03:35:33,614 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/119a71f483f449fc8f5abda62d01f89b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/119a71f483f449fc8f5abda62d01f89b 2024-12-10T03:35:33,614 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/a12597f4238a44f9ad4a317326ce0f99 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/a12597f4238a44f9ad4a317326ce0f99 2024-12-10T03:35:33,614 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/84c7a984ac3649d28b11c272be1a8039 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/84c7a984ac3649d28b11c272be1a8039 2024-12-10T03:35:33,615 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/50b9ef1c5472:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0aded5c1340f4dafae9c2affec5eb238, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/b2d44bb527d24becadd7389971c811e5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8475629027d3420b8cc7ac26f37885d6, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/4284a9eafb7c4d4e9b5dccb2f4506ac3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60f1b4cf6c88469a9c3277183c0ec468, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/d0e152c6bb6045e2be44e304e79a5727, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/29f90ad026e4429ea7be854f9b57c8b7] to archive 2024-12-10T03:35:33,616 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/50b9ef1c5472:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:33,617 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8475629027d3420b8cc7ac26f37885d6 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8475629027d3420b8cc7ac26f37885d6 2024-12-10T03:35:33,617 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0aded5c1340f4dafae9c2affec5eb238 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0aded5c1340f4dafae9c2affec5eb238 2024-12-10T03:35:33,617 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/b2d44bb527d24becadd7389971c811e5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/b2d44bb527d24becadd7389971c811e5 2024-12-10T03:35:33,618 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/4284a9eafb7c4d4e9b5dccb2f4506ac3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/4284a9eafb7c4d4e9b5dccb2f4506ac3 2024-12-10T03:35:33,618 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/d0e152c6bb6045e2be44e304e79a5727 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/d0e152c6bb6045e2be44e304e79a5727 2024-12-10T03:35:33,618 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60f1b4cf6c88469a9c3277183c0ec468 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60f1b4cf6c88469a9c3277183c0ec468 2024-12-10T03:35:33,618 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/29f90ad026e4429ea7be854f9b57c8b7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/29f90ad026e4429ea7be854f9b57c8b7 2024-12-10T03:35:33,619 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/50b9ef1c5472:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/4a7a492374d94d71a3590593f33ba582, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/f73a5969b5494114afb7e9fc4c84def2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8464516a74d74576aa082e649c43df1c] to archive 2024-12-10T03:35:33,619 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/50b9ef1c5472:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:33,621 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/4a7a492374d94d71a3590593f33ba582 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/4a7a492374d94d71a3590593f33ba582 2024-12-10T03:35:33,621 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/f73a5969b5494114afb7e9fc4c84def2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/f73a5969b5494114afb7e9fc4c84def2 2024-12-10T03:35:33,621 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8464516a74d74576aa082e649c43df1c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8464516a74d74576aa082e649c43df1c 2024-12-10T03:35:33,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801793843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801793844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801793846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801793846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:33,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801793854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:33,863 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/7af951a10bc84bdba76566a5f5c01c4f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7af951a10bc84bdba76566a5f5c01c4f 2024-12-10T03:35:33,867 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/C of b427273a9970e4bd4ba7fd36e4602947 into 7af951a10bc84bdba76566a5f5c01c4f(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:33,867 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:33,867 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/C, priority=12, startTime=1733801733026; duration=0sec 2024-12-10T03:35:33,868 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:33,868 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:C 2024-12-10T03:35:33,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T03:35:33,969 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/3cd6433654484322ad7a07351fb3e8f0 2024-12-10T03:35:33,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/2db5e9a81e534f16a4d02c4f9fce1d7f is 50, key is test_row_0/C:col10/1733801732597/Put/seqid=0 2024-12-10T03:35:33,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742244_1420 (size=9657) 2024-12-10T03:35:33,978 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/2db5e9a81e534f16a4d02c4f9fce1d7f 2024-12-10T03:35:33,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/ea7b4e3707ed4e5dbbb6750b3a831f09 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ea7b4e3707ed4e5dbbb6750b3a831f09 2024-12-10T03:35:33,984 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ea7b4e3707ed4e5dbbb6750b3a831f09, entries=100, sequenceid=124, filesize=9.4 K 2024-12-10T03:35:33,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/3cd6433654484322ad7a07351fb3e8f0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/3cd6433654484322ad7a07351fb3e8f0 2024-12-10T03:35:33,988 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/3cd6433654484322ad7a07351fb3e8f0, entries=100, sequenceid=124, filesize=9.4 K 2024-12-10T03:35:33,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/2db5e9a81e534f16a4d02c4f9fce1d7f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2db5e9a81e534f16a4d02c4f9fce1d7f 2024-12-10T03:35:33,993 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2db5e9a81e534f16a4d02c4f9fce1d7f, entries=100, sequenceid=124, filesize=9.4 K 2024-12-10T03:35:33,994 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for b427273a9970e4bd4ba7fd36e4602947 in 854ms, sequenceid=124, compaction requested=false 2024-12-10T03:35:33,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:33,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:33,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-10T03:35:33,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-10T03:35:33,996 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-10T03:35:33,996 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1580 sec 2024-12-10T03:35:33,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.1610 sec 2024-12-10T03:35:34,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:34,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-10T03:35:34,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:34,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:34,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:34,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:34,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:34,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:34,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801794355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801794355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801794355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801794358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801794358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/ec6da52c87534ecea6c36cbc2db6d720 is 50, key is test_row_0/A:col10/1733801734349/Put/seqid=0 2024-12-10T03:35:34,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742245_1421 (size=14541) 2024-12-10T03:35:34,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801794458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801794459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801794459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801794461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801794662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801794663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801794663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801794663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/ec6da52c87534ecea6c36cbc2db6d720 2024-12-10T03:35:34,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/eb78970f5c4743feb996f33770806929 is 50, key is test_row_0/B:col10/1733801734349/Put/seqid=0 2024-12-10T03:35:34,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742246_1422 (size=12151) 2024-12-10T03:35:34,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/eb78970f5c4743feb996f33770806929 2024-12-10T03:35:34,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/7c3630a4cb7d475082ec144ddcd5d6be is 50, key is test_row_0/C:col10/1733801734349/Put/seqid=0 2024-12-10T03:35:34,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742247_1423 (size=12151) 2024-12-10T03:35:34,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T03:35:34,939 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-10T03:35:34,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:34,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-10T03:35:34,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T03:35:34,941 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:34,942 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:34,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:34,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801794965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801794966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801794966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:34,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801794967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T03:35:35,093 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T03:35:35,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:35,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:35,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:35,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:35,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:35,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:35,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/7c3630a4cb7d475082ec144ddcd5d6be 2024-12-10T03:35:35,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/ec6da52c87534ecea6c36cbc2db6d720 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ec6da52c87534ecea6c36cbc2db6d720 2024-12-10T03:35:35,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ec6da52c87534ecea6c36cbc2db6d720, entries=200, sequenceid=155, filesize=14.2 K 2024-12-10T03:35:35,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/eb78970f5c4743feb996f33770806929 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/eb78970f5c4743feb996f33770806929 2024-12-10T03:35:35,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/eb78970f5c4743feb996f33770806929, entries=150, sequenceid=155, filesize=11.9 K 2024-12-10T03:35:35,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/7c3630a4cb7d475082ec144ddcd5d6be as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7c3630a4cb7d475082ec144ddcd5d6be 2024-12-10T03:35:35,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7c3630a4cb7d475082ec144ddcd5d6be, entries=150, sequenceid=155, filesize=11.9 K 2024-12-10T03:35:35,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for b427273a9970e4bd4ba7fd36e4602947 in 882ms, sequenceid=155, compaction requested=true 2024-12-10T03:35:35,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:35,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:35,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:35,232 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:35,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:35,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:35,232 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:35,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:35,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:35,233 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:35,233 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36439 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:35,233 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/B is initiating minor compaction (all files) 2024-12-10T03:35:35,233 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/A is initiating minor compaction (all files) 2024-12-10T03:35:35,233 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/A in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:35,233 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/B in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:35,233 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/861e5c25e5a7435a93d1d996ad3d7606, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ea7b4e3707ed4e5dbbb6750b3a831f09, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ec6da52c87534ecea6c36cbc2db6d720] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=35.6 K 2024-12-10T03:35:35,233 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/7e4711e511bc4f48a97f6b7fe5fd06d0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/3cd6433654484322ad7a07351fb3e8f0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/eb78970f5c4743feb996f33770806929] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=33.3 K 2024-12-10T03:35:35,233 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 861e5c25e5a7435a93d1d996ad3d7606, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733801731465 2024-12-10T03:35:35,233 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e4711e511bc4f48a97f6b7fe5fd06d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733801731465 2024-12-10T03:35:35,233 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea7b4e3707ed4e5dbbb6750b3a831f09, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733801732593 2024-12-10T03:35:35,233 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cd6433654484322ad7a07351fb3e8f0, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733801732593 2024-12-10T03:35:35,234 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec6da52c87534ecea6c36cbc2db6d720, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733801733231 2024-12-10T03:35:35,234 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting eb78970f5c4743feb996f33770806929, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733801733231 2024-12-10T03:35:35,239 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#A#compaction#357 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:35,239 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/ae25fa6f7bed47f98775a15c56752622 is 50, key is test_row_0/A:col10/1733801734349/Put/seqid=0 2024-12-10T03:35:35,240 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#B#compaction#358 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:35,241 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/459f669715714e60ba888ba80805f8c7 is 50, key is test_row_0/B:col10/1733801734349/Put/seqid=0 2024-12-10T03:35:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T03:35:35,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742248_1424 (size=12254) 2024-12-10T03:35:35,245 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T03:35:35,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:35,246 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T03:35:35,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:35,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:35,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:35,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:35,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:35,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:35,249 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/ae25fa6f7bed47f98775a15c56752622 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ae25fa6f7bed47f98775a15c56752622 2024-12-10T03:35:35,253 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/A of b427273a9970e4bd4ba7fd36e4602947 into ae25fa6f7bed47f98775a15c56752622(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:35,253 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:35,253 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/A, priority=13, startTime=1733801735232; duration=0sec 2024-12-10T03:35:35,254 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:35,254 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:A 2024-12-10T03:35:35,254 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:35,255 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:35,255 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/C is initiating minor compaction (all files) 2024-12-10T03:35:35,255 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/C in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:35,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/953241278d54463887b1fcf862b799c2 is 50, key is test_row_1/A:col10/1733801734354/Put/seqid=0 2024-12-10T03:35:35,255 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7af951a10bc84bdba76566a5f5c01c4f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2db5e9a81e534f16a4d02c4f9fce1d7f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7c3630a4cb7d475082ec144ddcd5d6be] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=33.3 K 2024-12-10T03:35:35,256 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7af951a10bc84bdba76566a5f5c01c4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733801731465 2024-12-10T03:35:35,256 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2db5e9a81e534f16a4d02c4f9fce1d7f, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1733801732593 2024-12-10T03:35:35,256 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c3630a4cb7d475082ec144ddcd5d6be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733801733231 2024-12-10T03:35:35,273 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#C#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:35,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742250_1426 (size=9757) 2024-12-10T03:35:35,274 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/fe8e9b42562e46c69171a2d36240b134 is 50, key is test_row_0/C:col10/1733801734349/Put/seqid=0 2024-12-10T03:35:35,274 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/953241278d54463887b1fcf862b799c2 2024-12-10T03:35:35,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742249_1425 (size=12254) 2024-12-10T03:35:35,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742251_1427 (size=12391) 2024-12-10T03:35:35,281 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/459f669715714e60ba888ba80805f8c7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/459f669715714e60ba888ba80805f8c7 2024-12-10T03:35:35,284 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/B of b427273a9970e4bd4ba7fd36e4602947 into 459f669715714e60ba888ba80805f8c7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:35,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/52e9888636cd4446a6178e06e314a26a is 50, key is test_row_1/B:col10/1733801734354/Put/seqid=0 2024-12-10T03:35:35,284 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:35,284 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/B, priority=13, startTime=1733801735232; duration=0sec 2024-12-10T03:35:35,285 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:35,285 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:B 2024-12-10T03:35:35,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742252_1428 (size=9757) 2024-12-10T03:35:35,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:35,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:35,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801795471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801795472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801795472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801795477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801795477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T03:35:35,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801795582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801795584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,683 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/fe8e9b42562e46c69171a2d36240b134 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fe8e9b42562e46c69171a2d36240b134 2024-12-10T03:35:35,686 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/C of b427273a9970e4bd4ba7fd36e4602947 into fe8e9b42562e46c69171a2d36240b134(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:35,686 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:35,686 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/C, priority=13, startTime=1733801735232; duration=0sec 2024-12-10T03:35:35,686 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:35,686 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:C 2024-12-10T03:35:35,688 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/52e9888636cd4446a6178e06e314a26a 2024-12-10T03:35:35,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/5c35a93bb0214ba9ac39197371dafa20 is 50, key is test_row_1/C:col10/1733801734354/Put/seqid=0 2024-12-10T03:35:35,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742253_1429 (size=9757) 2024-12-10T03:35:35,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801795784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:35,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801795789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T03:35:36,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801796089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801796093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,096 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/5c35a93bb0214ba9ac39197371dafa20 2024-12-10T03:35:36,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/953241278d54463887b1fcf862b799c2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/953241278d54463887b1fcf862b799c2 2024-12-10T03:35:36,102 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/953241278d54463887b1fcf862b799c2, entries=100, sequenceid=163, filesize=9.5 K 2024-12-10T03:35:36,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/52e9888636cd4446a6178e06e314a26a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/52e9888636cd4446a6178e06e314a26a 2024-12-10T03:35:36,105 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/52e9888636cd4446a6178e06e314a26a, entries=100, sequenceid=163, filesize=9.5 K 2024-12-10T03:35:36,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/5c35a93bb0214ba9ac39197371dafa20 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5c35a93bb0214ba9ac39197371dafa20 2024-12-10T03:35:36,108 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5c35a93bb0214ba9ac39197371dafa20, entries=100, sequenceid=163, filesize=9.5 K 2024-12-10T03:35:36,109 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for b427273a9970e4bd4ba7fd36e4602947 in 862ms, sequenceid=163, compaction requested=false 2024-12-10T03:35:36,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:36,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:36,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-10T03:35:36,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-10T03:35:36,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-10T03:35:36,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1670 sec 2024-12-10T03:35:36,111 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.1700 sec 2024-12-10T03:35:36,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:36,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-10T03:35:36,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:36,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:36,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:36,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:36,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:36,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:36,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/f276b12ab96e475e9fcd93541099087f is 50, key is test_row_0/A:col10/1733801735465/Put/seqid=0 2024-12-10T03:35:36,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742254_1430 (size=12151) 2024-12-10T03:35:36,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801796487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801796488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801796488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801796591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801796591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801796593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801796594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801796597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801796796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801796797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801796798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:36,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/f276b12ab96e475e9fcd93541099087f 2024-12-10T03:35:36,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/60342431768a41e0bf687ff47bf80916 is 50, key is test_row_0/B:col10/1733801735465/Put/seqid=0 2024-12-10T03:35:36,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742255_1431 (size=12151) 2024-12-10T03:35:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T03:35:37,044 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-10T03:35:37,045 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-10T03:35:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T03:35:37,046 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:37,047 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:37,047 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:37,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:37,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801797099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801797100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801797100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T03:35:37,198 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T03:35:37,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:37,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/60342431768a41e0bf687ff47bf80916 2024-12-10T03:35:37,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/b7b21e8f533d46f8afd8ac93d5a7c30c is 50, key is test_row_0/C:col10/1733801735465/Put/seqid=0 2024-12-10T03:35:37,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742256_1432 (size=12151) 2024-12-10T03:35:37,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T03:35:37,350 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T03:35:37,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:37,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,502 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T03:35:37,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:37,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:37,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801797600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:37,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801797603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:37,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801797603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:37,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801797606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:37,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801797607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T03:35:37,655 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T03:35:37,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:37,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:37,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/b7b21e8f533d46f8afd8ac93d5a7c30c 2024-12-10T03:35:37,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/f276b12ab96e475e9fcd93541099087f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f276b12ab96e475e9fcd93541099087f 2024-12-10T03:35:37,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f276b12ab96e475e9fcd93541099087f, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T03:35:37,713 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/60342431768a41e0bf687ff47bf80916 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60342431768a41e0bf687ff47bf80916 2024-12-10T03:35:37,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60342431768a41e0bf687ff47bf80916, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T03:35:37,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/b7b21e8f533d46f8afd8ac93d5a7c30c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/b7b21e8f533d46f8afd8ac93d5a7c30c 2024-12-10T03:35:37,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/b7b21e8f533d46f8afd8ac93d5a7c30c, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T03:35:37,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for b427273a9970e4bd4ba7fd36e4602947 in 1247ms, sequenceid=196, compaction requested=true 2024-12-10T03:35:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:37,731 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:37,731 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:37,732 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34162 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:37,732 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34162 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:37,732 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/B is initiating minor compaction (all files) 2024-12-10T03:35:37,732 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/A is initiating minor compaction (all files) 2024-12-10T03:35:37,732 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/A in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,732 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/B in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,732 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/459f669715714e60ba888ba80805f8c7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/52e9888636cd4446a6178e06e314a26a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60342431768a41e0bf687ff47bf80916] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=33.4 K 2024-12-10T03:35:37,732 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ae25fa6f7bed47f98775a15c56752622, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/953241278d54463887b1fcf862b799c2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f276b12ab96e475e9fcd93541099087f] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=33.4 K 2024-12-10T03:35:37,732 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae25fa6f7bed47f98775a15c56752622, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733801733231 2024-12-10T03:35:37,732 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 459f669715714e60ba888ba80805f8c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733801733231 2024-12-10T03:35:37,733 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 52e9888636cd4446a6178e06e314a26a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801734354 2024-12-10T03:35:37,733 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 953241278d54463887b1fcf862b799c2, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801734354 2024-12-10T03:35:37,733 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting f276b12ab96e475e9fcd93541099087f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801735465 2024-12-10T03:35:37,733 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 60342431768a41e0bf687ff47bf80916, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801735465 2024-12-10T03:35:37,738 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#B#compaction#367 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:37,738 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#A#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:37,739 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/849baa3c037c46dd8056c48c7710f494 is 50, key is test_row_0/A:col10/1733801735465/Put/seqid=0 2024-12-10T03:35:37,739 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/242923957ab64caab0c8b69ccfabd882 is 50, key is test_row_0/B:col10/1733801735465/Put/seqid=0 2024-12-10T03:35:37,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742258_1434 (size=12357) 2024-12-10T03:35:37,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742257_1433 (size=12357) 2024-12-10T03:35:37,807 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:37,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T03:35:37,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:37,808 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-10T03:35:37,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:37,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:37,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:37,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:37,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:37,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:37,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/184785cc3d0846b0b9d1ad1abe7c2a46 is 50, key is test_row_0/A:col10/1733801736487/Put/seqid=0 2024-12-10T03:35:37,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742259_1435 (size=12151) 2024-12-10T03:35:38,148 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/242923957ab64caab0c8b69ccfabd882 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/242923957ab64caab0c8b69ccfabd882 2024-12-10T03:35:38,148 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/849baa3c037c46dd8056c48c7710f494 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/849baa3c037c46dd8056c48c7710f494 2024-12-10T03:35:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T03:35:38,152 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/B of b427273a9970e4bd4ba7fd36e4602947 into 242923957ab64caab0c8b69ccfabd882(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:38,152 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/A of b427273a9970e4bd4ba7fd36e4602947 into 849baa3c037c46dd8056c48c7710f494(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:38,152 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:38,152 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:38,152 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/A, priority=13, startTime=1733801737731; duration=0sec 2024-12-10T03:35:38,152 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/B, priority=13, startTime=1733801737731; duration=0sec 2024-12-10T03:35:38,152 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:38,152 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:38,152 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:A 2024-12-10T03:35:38,152 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:B 2024-12-10T03:35:38,152 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:38,153 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:38,153 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/C is initiating minor compaction (all files) 2024-12-10T03:35:38,153 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/C in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:38,153 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fe8e9b42562e46c69171a2d36240b134, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5c35a93bb0214ba9ac39197371dafa20, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/b7b21e8f533d46f8afd8ac93d5a7c30c] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=33.5 K 2024-12-10T03:35:38,153 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe8e9b42562e46c69171a2d36240b134, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733801733231 2024-12-10T03:35:38,153 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c35a93bb0214ba9ac39197371dafa20, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1733801734354 2024-12-10T03:35:38,154 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7b21e8f533d46f8afd8ac93d5a7c30c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801735465 2024-12-10T03:35:38,159 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#C#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:38,159 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a485f8463e684014ada960fe7fee3f26 is 50, key is test_row_0/C:col10/1733801735465/Put/seqid=0 2024-12-10T03:35:38,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742260_1436 (size=12493) 2024-12-10T03:35:38,215 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/184785cc3d0846b0b9d1ad1abe7c2a46 2024-12-10T03:35:38,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/8568bf96f3134f09bda505ec32f45743 is 50, key is test_row_0/B:col10/1733801736487/Put/seqid=0 2024-12-10T03:35:38,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742261_1437 (size=12151) 2024-12-10T03:35:38,580 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a485f8463e684014ada960fe7fee3f26 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a485f8463e684014ada960fe7fee3f26 2024-12-10T03:35:38,584 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/C of b427273a9970e4bd4ba7fd36e4602947 into a485f8463e684014ada960fe7fee3f26(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:38,584 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:38,584 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/C, priority=13, startTime=1733801737731; duration=0sec 2024-12-10T03:35:38,584 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:38,584 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:C 2024-12-10T03:35:38,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:38,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:38,623 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/8568bf96f3134f09bda505ec32f45743 2024-12-10T03:35:38,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/fcedbdff77604eb58cddd1fb5d7b7828 is 50, key is test_row_0/C:col10/1733801736487/Put/seqid=0 2024-12-10T03:35:38,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742262_1438 (size=12151) 2024-12-10T03:35:38,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801798643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:38,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801798646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:38,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801798647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:38,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801798750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:38,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801798751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:38,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801798751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:38,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801798956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:38,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801798956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:38,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801798956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,031 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/fcedbdff77604eb58cddd1fb5d7b7828 2024-12-10T03:35:39,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/184785cc3d0846b0b9d1ad1abe7c2a46 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/184785cc3d0846b0b9d1ad1abe7c2a46 2024-12-10T03:35:39,038 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/184785cc3d0846b0b9d1ad1abe7c2a46, entries=150, sequenceid=202, filesize=11.9 K 2024-12-10T03:35:39,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/8568bf96f3134f09bda505ec32f45743 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8568bf96f3134f09bda505ec32f45743 2024-12-10T03:35:39,042 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8568bf96f3134f09bda505ec32f45743, entries=150, sequenceid=202, filesize=11.9 K 2024-12-10T03:35:39,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/fcedbdff77604eb58cddd1fb5d7b7828 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fcedbdff77604eb58cddd1fb5d7b7828 2024-12-10T03:35:39,046 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fcedbdff77604eb58cddd1fb5d7b7828, entries=150, sequenceid=202, filesize=11.9 K 2024-12-10T03:35:39,047 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for b427273a9970e4bd4ba7fd36e4602947 in 1239ms, sequenceid=202, compaction requested=false 2024-12-10T03:35:39,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:39,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-10T03:35:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-10T03:35:39,048 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-10T03:35:39,049 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0010 sec 2024-12-10T03:35:39,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 2.0040 sec 2024-12-10T03:35:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T03:35:39,149 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-10T03:35:39,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-10T03:35:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T03:35:39,151 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:39,152 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:39,152 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T03:35:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:39,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-10T03:35:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:39,265 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:39,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801799265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801799265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/85e6e77414274e3b85612db58a0d5316 is 50, key is test_row_0/A:col10/1733801738635/Put/seqid=0 2024-12-10T03:35:39,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801799266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742263_1439 (size=12151) 2024-12-10T03:35:39,303 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:39,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:39,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801799369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801799369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801799369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T03:35:39,455 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:39,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:39,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801799573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801799573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801799573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,607 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:39,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801799609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,610 DEBUG [Thread-1779 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:35:39,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801799616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,620 DEBUG [Thread-1781 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:35:39,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/85e6e77414274e3b85612db58a0d5316 2024-12-10T03:35:39,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/f17d4f55b71d43088ddd88cadbda49a9 is 50, key is test_row_0/B:col10/1733801738635/Put/seqid=0 2024-12-10T03:35:39,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742264_1440 (size=12151) 2024-12-10T03:35:39,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T03:35:39,759 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:39,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:39,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801799876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801799878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801799878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,911 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:39,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:39,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:39,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:39,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:39,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,063 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:40,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:40,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:40,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/f17d4f55b71d43088ddd88cadbda49a9 2024-12-10T03:35:40,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/e176a748372a4d7699b56dd49c0fa3bb is 50, key is test_row_0/C:col10/1733801738635/Put/seqid=0 2024-12-10T03:35:40,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742265_1441 (size=12151) 2024-12-10T03:35:40,215 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:40,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T03:35:40,368 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:40,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:40,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:40,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:40,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801800382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:40,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:40,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801800383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:40,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801800384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:40,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/e176a748372a4d7699b56dd49c0fa3bb 2024-12-10T03:35:40,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/85e6e77414274e3b85612db58a0d5316 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/85e6e77414274e3b85612db58a0d5316 2024-12-10T03:35:40,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/85e6e77414274e3b85612db58a0d5316, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T03:35:40,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/f17d4f55b71d43088ddd88cadbda49a9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f17d4f55b71d43088ddd88cadbda49a9 2024-12-10T03:35:40,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f17d4f55b71d43088ddd88cadbda49a9, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T03:35:40,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/e176a748372a4d7699b56dd49c0fa3bb as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e176a748372a4d7699b56dd49c0fa3bb 2024-12-10T03:35:40,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e176a748372a4d7699b56dd49c0fa3bb, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T03:35:40,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=6.71 KB/6870 for b427273a9970e4bd4ba7fd36e4602947 in 1242ms, sequenceid=237, compaction requested=true 2024-12-10T03:35:40,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:40,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:40,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:40,506 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:40,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:40,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:40,506 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:40,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:40,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:40,507 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:40,507 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:40,507 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/B is initiating minor compaction (all files) 2024-12-10T03:35:40,507 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/A is initiating minor compaction (all files) 2024-12-10T03:35:40,507 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/A in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,507 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/B in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,507 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/242923957ab64caab0c8b69ccfabd882, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8568bf96f3134f09bda505ec32f45743, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f17d4f55b71d43088ddd88cadbda49a9] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=35.8 K 2024-12-10T03:35:40,507 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/849baa3c037c46dd8056c48c7710f494, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/184785cc3d0846b0b9d1ad1abe7c2a46, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/85e6e77414274e3b85612db58a0d5316] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=35.8 K 2024-12-10T03:35:40,508 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 849baa3c037c46dd8056c48c7710f494, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801735465 2024-12-10T03:35:40,508 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 242923957ab64caab0c8b69ccfabd882, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801735465 2024-12-10T03:35:40,508 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 184785cc3d0846b0b9d1ad1abe7c2a46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733801736486 2024-12-10T03:35:40,508 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 8568bf96f3134f09bda505ec32f45743, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733801736486 2024-12-10T03:35:40,508 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85e6e77414274e3b85612db58a0d5316, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801738635 2024-12-10T03:35:40,508 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f17d4f55b71d43088ddd88cadbda49a9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801738635 2024-12-10T03:35:40,515 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#A#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:40,516 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/62f227fede09428a931d48a284d968ba is 50, key is test_row_0/A:col10/1733801738635/Put/seqid=0 2024-12-10T03:35:40,520 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:40,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T03:35:40,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,520 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-10T03:35:40,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:40,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:40,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:40,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:40,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:40,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:40,528 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#B#compaction#376 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:40,529 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/f77ce9d5a6444c6c8aaf274026df991a is 50, key is test_row_0/B:col10/1733801738635/Put/seqid=0 2024-12-10T03:35:40,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/46ab81a6d5804d0296ba20c8427457aa is 50, key is test_row_0/A:col10/1733801739265/Put/seqid=0 2024-12-10T03:35:40,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742266_1442 (size=12459) 2024-12-10T03:35:40,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742267_1443 (size=12459) 2024-12-10T03:35:40,540 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/f77ce9d5a6444c6c8aaf274026df991a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f77ce9d5a6444c6c8aaf274026df991a 2024-12-10T03:35:40,543 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/B of b427273a9970e4bd4ba7fd36e4602947 into f77ce9d5a6444c6c8aaf274026df991a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:40,543 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:40,543 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/B, priority=13, startTime=1733801740506; duration=0sec 2024-12-10T03:35:40,543 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:40,543 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:B 2024-12-10T03:35:40,543 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:40,544 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:40,544 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/C is initiating minor compaction (all files) 2024-12-10T03:35:40,544 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/C in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:40,544 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a485f8463e684014ada960fe7fee3f26, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fcedbdff77604eb58cddd1fb5d7b7828, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e176a748372a4d7699b56dd49c0fa3bb] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=35.9 K 2024-12-10T03:35:40,544 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting a485f8463e684014ada960fe7fee3f26, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733801735465 2024-12-10T03:35:40,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742268_1444 (size=7365) 2024-12-10T03:35:40,545 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting fcedbdff77604eb58cddd1fb5d7b7828, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1733801736486 2024-12-10T03:35:40,545 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting e176a748372a4d7699b56dd49c0fa3bb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801738635 2024-12-10T03:35:40,550 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#C#compaction#378 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:40,551 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/81715d4ff94a42d394c184020533ceae is 50, key is test_row_0/C:col10/1733801738635/Put/seqid=0 2024-12-10T03:35:40,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742269_1445 (size=12595) 2024-12-10T03:35:40,936 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/62f227fede09428a931d48a284d968ba as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/62f227fede09428a931d48a284d968ba 2024-12-10T03:35:40,939 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/A of b427273a9970e4bd4ba7fd36e4602947 into 62f227fede09428a931d48a284d968ba(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:40,940 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:40,940 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/A, priority=13, startTime=1733801740506; duration=0sec 2024-12-10T03:35:40,940 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:40,940 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:A 2024-12-10T03:35:40,945 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/46ab81a6d5804d0296ba20c8427457aa 2024-12-10T03:35:40,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/a17e3c431ea1433b9fffcc0b09ba71c9 is 50, key is test_row_0/B:col10/1733801739265/Put/seqid=0 2024-12-10T03:35:40,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742270_1446 (size=7365) 2024-12-10T03:35:40,957 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/81715d4ff94a42d394c184020533ceae as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/81715d4ff94a42d394c184020533ceae 2024-12-10T03:35:40,961 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/C of b427273a9970e4bd4ba7fd36e4602947 into 81715d4ff94a42d394c184020533ceae(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:40,961 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:40,961 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/C, priority=13, startTime=1733801740506; duration=0sec 2024-12-10T03:35:40,961 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:40,961 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:C 2024-12-10T03:35:41,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T03:35:41,354 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/a17e3c431ea1433b9fffcc0b09ba71c9 2024-12-10T03:35:41,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a284ab9f0acb47c0a302f7ef0c3aba2d is 50, key is test_row_0/C:col10/1733801739265/Put/seqid=0 2024-12-10T03:35:41,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742271_1447 (size=7365) 2024-12-10T03:35:41,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:41,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:41,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801801414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801801414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801801415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801801517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801801518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801801518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801801721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801801721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:41,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801801722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:41,764 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a284ab9f0acb47c0a302f7ef0c3aba2d 2024-12-10T03:35:41,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/46ab81a6d5804d0296ba20c8427457aa as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/46ab81a6d5804d0296ba20c8427457aa 2024-12-10T03:35:41,770 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/46ab81a6d5804d0296ba20c8427457aa, entries=50, sequenceid=241, filesize=7.2 K 2024-12-10T03:35:41,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/a17e3c431ea1433b9fffcc0b09ba71c9 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/a17e3c431ea1433b9fffcc0b09ba71c9 2024-12-10T03:35:41,779 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/a17e3c431ea1433b9fffcc0b09ba71c9, entries=50, sequenceid=241, filesize=7.2 K 2024-12-10T03:35:41,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a284ab9f0acb47c0a302f7ef0c3aba2d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a284ab9f0acb47c0a302f7ef0c3aba2d 2024-12-10T03:35:41,782 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a284ab9f0acb47c0a302f7ef0c3aba2d, entries=50, sequenceid=241, filesize=7.2 K 2024-12-10T03:35:41,783 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=194.56 KB/199230 for b427273a9970e4bd4ba7fd36e4602947 in 1263ms, sequenceid=241, compaction requested=false 2024-12-10T03:35:41,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:41,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:41,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-10T03:35:41,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-10T03:35:41,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-10T03:35:41,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6310 sec 2024-12-10T03:35:41,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.6350 sec 2024-12-10T03:35:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:42,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=201.27 KB heapSize=528.09 KB 2024-12-10T03:35:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:42,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801802026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:42,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:42,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/191ebfaf00734b21ace2e436bc9a3030 is 50, key is test_row_0/A:col10/1733801741401/Put/seqid=0 2024-12-10T03:35:42,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801802029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801802029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742272_1448 (size=12301) 2024-12-10T03:35:42,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801802134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801802134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801802338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801802338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/191ebfaf00734b21ace2e436bc9a3030 2024-12-10T03:35:42,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/dee0991e8a094c469a23ea308395ebba is 50, key is test_row_0/B:col10/1733801741401/Put/seqid=0 2024-12-10T03:35:42,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742273_1449 (size=12301) 2024-12-10T03:35:42,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801802534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801802642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801802643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:42,843 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/dee0991e8a094c469a23ea308395ebba 2024-12-10T03:35:42,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/ac0daf91dd70482e9a7561b2041f305c is 50, key is test_row_0/C:col10/1733801741401/Put/seqid=0 2024-12-10T03:35:42,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742274_1450 (size=12301) 2024-12-10T03:35:43,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801803148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801803150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/ac0daf91dd70482e9a7561b2041f305c 2024-12-10T03:35:43,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T03:35:43,256 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-10T03:35:43,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/191ebfaf00734b21ace2e436bc9a3030 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/191ebfaf00734b21ace2e436bc9a3030 2024-12-10T03:35:43,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:43,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-10T03:35:43,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T03:35:43,258 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:43,259 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:43,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:43,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/191ebfaf00734b21ace2e436bc9a3030, entries=150, sequenceid=278, filesize=12.0 K 2024-12-10T03:35:43,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/dee0991e8a094c469a23ea308395ebba as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/dee0991e8a094c469a23ea308395ebba 2024-12-10T03:35:43,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/dee0991e8a094c469a23ea308395ebba, entries=150, sequenceid=278, filesize=12.0 K 2024-12-10T03:35:43,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/ac0daf91dd70482e9a7561b2041f305c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/ac0daf91dd70482e9a7561b2041f305c 2024-12-10T03:35:43,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/ac0daf91dd70482e9a7561b2041f305c, entries=150, sequenceid=278, filesize=12.0 K 2024-12-10T03:35:43,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~207.98 KB/212970, heapSize ~545.63 KB/558720, currentSize=0 B/0 for b427273a9970e4bd4ba7fd36e4602947 in 1239ms, sequenceid=278, compaction requested=true 2024-12-10T03:35:43,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:43,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:43,267 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:43,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:43,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:43,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:43,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:43,267 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:43,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:43,268 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32125 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:43,268 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32125 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:43,268 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/B is initiating minor compaction (all files) 2024-12-10T03:35:43,268 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/A is initiating minor compaction (all files) 2024-12-10T03:35:43,268 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/B in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,268 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/A in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,268 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/62f227fede09428a931d48a284d968ba, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/46ab81a6d5804d0296ba20c8427457aa, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/191ebfaf00734b21ace2e436bc9a3030] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=31.4 K 2024-12-10T03:35:43,268 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f77ce9d5a6444c6c8aaf274026df991a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/a17e3c431ea1433b9fffcc0b09ba71c9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/dee0991e8a094c469a23ea308395ebba] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=31.4 K 2024-12-10T03:35:43,268 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f77ce9d5a6444c6c8aaf274026df991a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801738635 2024-12-10T03:35:43,269 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62f227fede09428a931d48a284d968ba, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801738635 2024-12-10T03:35:43,269 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting a17e3c431ea1433b9fffcc0b09ba71c9, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733801739265 2024-12-10T03:35:43,269 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46ab81a6d5804d0296ba20c8427457aa, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733801739265 2024-12-10T03:35:43,269 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 191ebfaf00734b21ace2e436bc9a3030, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801741401 2024-12-10T03:35:43,269 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting dee0991e8a094c469a23ea308395ebba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801741401 2024-12-10T03:35:43,276 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#B#compaction#384 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:43,276 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/1330d2f8fbe645b8bce74268e6ea3e88 is 50, key is test_row_0/B:col10/1733801741401/Put/seqid=0 2024-12-10T03:35:43,278 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#A#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:43,279 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/f5b8c64e30ff41748f4d9d8294ab85ca is 50, key is test_row_0/A:col10/1733801741401/Put/seqid=0 2024-12-10T03:35:43,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742275_1451 (size=12711) 2024-12-10T03:35:43,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742276_1452 (size=12711) 2024-12-10T03:35:43,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T03:35:43,410 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T03:35:43,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:43,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-10T03:35:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-10T03:35:43,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-10T03:35:43,413 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 153 msec 2024-12-10T03:35:43,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 156 msec 2024-12-10T03:35:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T03:35:43,560 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-10T03:35:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:43,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:35:43,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:43,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:43,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:43,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:43,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:43,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:43,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:43,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-10T03:35:43,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T03:35:43,562 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:43,563 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:43,563 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:43,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/0b82e1d3d45d452dad0583f207feb832 is 50, key is test_row_0/A:col10/1733801743551/Put/seqid=0 2024-12-10T03:35:43,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742277_1453 (size=14741) 2024-12-10T03:35:43,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/0b82e1d3d45d452dad0583f207feb832 2024-12-10T03:35:43,574 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/9d618c64efdf467c811c3da1598d614c is 50, key is test_row_0/B:col10/1733801743551/Put/seqid=0 2024-12-10T03:35:43,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742278_1454 (size=12301) 2024-12-10T03:35:43,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55854 deadline: 1733801803647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801803647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,653 DEBUG [Thread-1781 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:35:43,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801803650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T03:35:43,693 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/1330d2f8fbe645b8bce74268e6ea3e88 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/1330d2f8fbe645b8bce74268e6ea3e88 2024-12-10T03:35:43,697 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/B of b427273a9970e4bd4ba7fd36e4602947 into 1330d2f8fbe645b8bce74268e6ea3e88(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:43,697 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:43,697 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/B, priority=13, startTime=1733801743267; duration=0sec 2024-12-10T03:35:43,698 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:43,698 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:B 2024-12-10T03:35:43,698 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:35:43,698 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 32261 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:35:43,698 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/C is initiating minor compaction (all files) 2024-12-10T03:35:43,698 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/C in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,699 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/81715d4ff94a42d394c184020533ceae, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a284ab9f0acb47c0a302f7ef0c3aba2d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/ac0daf91dd70482e9a7561b2041f305c] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=31.5 K 2024-12-10T03:35:43,700 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 81715d4ff94a42d394c184020533ceae, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733801738635 2024-12-10T03:35:43,700 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting a284ab9f0acb47c0a302f7ef0c3aba2d, keycount=50, bloomtype=ROW, size=7.2 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733801739265 2024-12-10T03:35:43,701 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ac0daf91dd70482e9a7561b2041f305c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801741401 2024-12-10T03:35:43,701 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/f5b8c64e30ff41748f4d9d8294ab85ca as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f5b8c64e30ff41748f4d9d8294ab85ca 2024-12-10T03:35:43,705 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/A of b427273a9970e4bd4ba7fd36e4602947 into f5b8c64e30ff41748f4d9d8294ab85ca(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:43,705 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:43,705 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/A, priority=13, startTime=1733801743267; duration=0sec 2024-12-10T03:35:43,705 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:43,705 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:A 2024-12-10T03:35:43,706 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#C#compaction#388 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:43,707 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/e8f4824a7bcd4c24977b6e0990c6c2be is 50, key is test_row_0/C:col10/1733801741401/Put/seqid=0 2024-12-10T03:35:43,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742279_1455 (size=12847) 2024-12-10T03:35:43,714 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,714 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/e8f4824a7bcd4c24977b6e0990c6c2be as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e8f4824a7bcd4c24977b6e0990c6c2be 2024-12-10T03:35:43,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T03:35:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:43,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:43,718 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/C of b427273a9970e4bd4ba7fd36e4602947 into e8f4824a7bcd4c24977b6e0990c6c2be(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:43,718 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:43,719 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/C, priority=13, startTime=1733801743267; duration=0sec 2024-12-10T03:35:43,719 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:43,719 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:C 2024-12-10T03:35:43,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801803753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801803756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T03:35:43,866 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T03:35:43,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:43,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:43,867 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:43,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:43,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:43,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801803958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:43,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801803959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:43,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/9d618c64efdf467c811c3da1598d614c 2024-12-10T03:35:43,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/d14004a88c3b421a834e720e62db4027 is 50, key is test_row_0/C:col10/1733801743551/Put/seqid=0 2024-12-10T03:35:43,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742280_1456 (size=12301) 2024-12-10T03:35:44,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T03:35:44,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:44,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:44,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:44,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:44,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801804153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:44,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801804157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T03:35:44,171 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T03:35:44,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:44,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:44,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:44,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:44,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801804262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:44,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801804263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T03:35:44,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:44,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:44,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:44,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:44,386 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/d14004a88c3b421a834e720e62db4027 2024-12-10T03:35:44,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/0b82e1d3d45d452dad0583f207feb832 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/0b82e1d3d45d452dad0583f207feb832 2024-12-10T03:35:44,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/0b82e1d3d45d452dad0583f207feb832, entries=200, sequenceid=290, filesize=14.4 K 2024-12-10T03:35:44,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/9d618c64efdf467c811c3da1598d614c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/9d618c64efdf467c811c3da1598d614c 2024-12-10T03:35:44,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/9d618c64efdf467c811c3da1598d614c, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T03:35:44,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/d14004a88c3b421a834e720e62db4027 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/d14004a88c3b421a834e720e62db4027 2024-12-10T03:35:44,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/d14004a88c3b421a834e720e62db4027, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T03:35:44,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b427273a9970e4bd4ba7fd36e4602947 in 839ms, sequenceid=290, compaction requested=false 2024-12-10T03:35:44,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:44,475 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T03:35:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:44,476 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:35:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:44,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:44,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/b941eb7728284637a6df5eb599939148 is 50, key is test_row_0/A:col10/1733801743648/Put/seqid=0 2024-12-10T03:35:44,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742281_1457 (size=12301) 2024-12-10T03:35:44,619 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/b941eb7728284637a6df5eb599939148 2024-12-10T03:35:44,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/cde20e14a377426ab91d710627e098b1 is 50, key is test_row_0/B:col10/1733801743648/Put/seqid=0 2024-12-10T03:35:44,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742282_1458 (size=12301) 2024-12-10T03:35:44,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T03:35:44,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:44,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:44,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:44,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801804787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:44,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801804788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:44,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801804893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:44,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:44,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801804893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:45,028 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/cde20e14a377426ab91d710627e098b1 2024-12-10T03:35:45,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a87991db1011437dbdd7e283354f1caf is 50, key is test_row_0/C:col10/1733801743648/Put/seqid=0 2024-12-10T03:35:45,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742283_1459 (size=12301) 2024-12-10T03:35:45,099 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:45,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801805097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:45,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:45,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801805098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:45,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:45,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801805400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:45,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:45,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801805403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:45,437 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a87991db1011437dbdd7e283354f1caf 2024-12-10T03:35:45,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/b941eb7728284637a6df5eb599939148 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/b941eb7728284637a6df5eb599939148 2024-12-10T03:35:45,443 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/b941eb7728284637a6df5eb599939148, entries=150, sequenceid=318, filesize=12.0 K 2024-12-10T03:35:45,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/cde20e14a377426ab91d710627e098b1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/cde20e14a377426ab91d710627e098b1 2024-12-10T03:35:45,447 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/cde20e14a377426ab91d710627e098b1, entries=150, sequenceid=318, filesize=12.0 K 2024-12-10T03:35:45,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/a87991db1011437dbdd7e283354f1caf as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a87991db1011437dbdd7e283354f1caf 2024-12-10T03:35:45,451 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a87991db1011437dbdd7e283354f1caf, entries=150, sequenceid=318, filesize=12.0 K 2024-12-10T03:35:45,452 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for b427273a9970e4bd4ba7fd36e4602947 in 976ms, sequenceid=318, compaction requested=true 2024-12-10T03:35:45,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:45,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:45,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-10T03:35:45,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-10T03:35:45,454 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-10T03:35:45,454 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8900 sec 2024-12-10T03:35:45,455 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.8930 sec 2024-12-10T03:35:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T03:35:45,666 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-10T03:35:45,667 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-10T03:35:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T03:35:45,668 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:45,668 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:45,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T03:35:45,819 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:45,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T03:35:45,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:45,820 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:35:45,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:45,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:45,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:45,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:45,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:45,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:45,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/fc149b1bb2224319bef346d743d5a501 is 50, key is test_row_1/A:col10/1733801744782/Put/seqid=0 2024-12-10T03:35:45,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742284_1460 (size=9857) 2024-12-10T03:35:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:45,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. as already flushing 2024-12-10T03:35:45,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:45,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801805958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:45,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801805959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T03:35:46,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801806064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:46,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801806066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:46,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55776 deadline: 1733801806162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:46,164 DEBUG [Thread-1775 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:35:46,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55790 deadline: 1733801806169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:46,170 DEBUG [Thread-1783 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., hostname=50b9ef1c5472,37553,1733801610862, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:35:46,226 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/fc149b1bb2224319bef346d743d5a501 2024-12-10T03:35:46,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/f451f395f40e423e8ad83109e9ed4070 is 50, key is test_row_1/B:col10/1733801744782/Put/seqid=0 2024-12-10T03:35:46,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742285_1461 (size=9857) 2024-12-10T03:35:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T03:35:46,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801806267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:46,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801806271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:46,530 DEBUG [Thread-1794 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x076f0408 to 127.0.0.1:51621 2024-12-10T03:35:46,530 DEBUG [Thread-1794 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:46,532 DEBUG [Thread-1786 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f7c40ba to 127.0.0.1:51621 2024-12-10T03:35:46,532 DEBUG [Thread-1786 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:46,533 DEBUG [Thread-1788 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:51621 2024-12-10T03:35:46,533 DEBUG [Thread-1788 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:46,534 DEBUG [Thread-1792 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x184771cf to 127.0.0.1:51621 2024-12-10T03:35:46,534 DEBUG [Thread-1792 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:46,537 DEBUG [Thread-1790 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f2423f3 to 127.0.0.1:51621 2024-12-10T03:35:46,537 DEBUG [Thread-1790 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:46,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801806571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:46,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801806577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:46,635 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/f451f395f40e423e8ad83109e9ed4070 2024-12-10T03:35:46,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/38c8752b46874263ae748a6be7593267 is 50, key is test_row_1/C:col10/1733801744782/Put/seqid=0 2024-12-10T03:35:46,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742286_1462 (size=9857) 2024-12-10T03:35:46,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T03:35:47,053 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/38c8752b46874263ae748a6be7593267 2024-12-10T03:35:47,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/fc149b1bb2224319bef346d743d5a501 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/fc149b1bb2224319bef346d743d5a501 2024-12-10T03:35:47,069 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/fc149b1bb2224319bef346d743d5a501, entries=100, sequenceid=329, filesize=9.6 K 2024-12-10T03:35:47,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/f451f395f40e423e8ad83109e9ed4070 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f451f395f40e423e8ad83109e9ed4070 2024-12-10T03:35:47,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:47,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55856 deadline: 1733801807072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:47,074 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f451f395f40e423e8ad83109e9ed4070, entries=100, sequenceid=329, filesize=9.6 K 2024-12-10T03:35:47,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/38c8752b46874263ae748a6be7593267 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/38c8752b46874263ae748a6be7593267 2024-12-10T03:35:47,078 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/38c8752b46874263ae748a6be7593267, entries=100, sequenceid=329, filesize=9.6 K 2024-12-10T03:35:47,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:47,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:55810 deadline: 1733801807078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:47,079 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b427273a9970e4bd4ba7fd36e4602947 in 1260ms, sequenceid=329, compaction requested=true 2024-12-10T03:35:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-10T03:35:47,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-10T03:35:47,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-10T03:35:47,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4120 sec 2024-12-10T03:35:47,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.4150 sec 2024-12-10T03:35:47,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T03:35:47,774 INFO [Thread-1785 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-10T03:35:48,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:48,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T03:35:48,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:48,084 DEBUG [Thread-1777 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61d38088 to 127.0.0.1:51621 2024-12-10T03:35:48,084 DEBUG [Thread-1777 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:48,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:48,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:48,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:48,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:48,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:48,089 DEBUG [Thread-1779 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7043f683 to 127.0.0.1:51621 2024-12-10T03:35:48,089 DEBUG [Thread-1779 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:48,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/c6f351f3e0b945c3a67bbb3c6bc6c684 is 50, key is test_row_0/A:col10/1733801748080/Put/seqid=0 2024-12-10T03:35:48,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742287_1463 (size=12301) 2024-12-10T03:35:48,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/c6f351f3e0b945c3a67bbb3c6bc6c684 2024-12-10T03:35:48,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/0a374d6c3ed746c9b466967e3a47202c is 50, key is test_row_0/B:col10/1733801748080/Put/seqid=0 2024-12-10T03:35:48,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742288_1464 (size=12301) 2024-12-10T03:35:48,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/0a374d6c3ed746c9b466967e3a47202c 2024-12-10T03:35:48,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/2bf31698ecd7494982cf3d21aa4bf29a is 50, key is test_row_0/C:col10/1733801748080/Put/seqid=0 2024-12-10T03:35:48,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742289_1465 (size=12301) 2024-12-10T03:35:49,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/2bf31698ecd7494982cf3d21aa4bf29a 2024-12-10T03:35:49,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/c6f351f3e0b945c3a67bbb3c6bc6c684 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/c6f351f3e0b945c3a67bbb3c6bc6c684 2024-12-10T03:35:49,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/c6f351f3e0b945c3a67bbb3c6bc6c684, entries=150, sequenceid=355, filesize=12.0 K 2024-12-10T03:35:49,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/0a374d6c3ed746c9b466967e3a47202c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0a374d6c3ed746c9b466967e3a47202c 2024-12-10T03:35:49,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0a374d6c3ed746c9b466967e3a47202c, entries=150, sequenceid=355, filesize=12.0 K 2024-12-10T03:35:49,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/2bf31698ecd7494982cf3d21aa4bf29a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2bf31698ecd7494982cf3d21aa4bf29a 2024-12-10T03:35:49,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2bf31698ecd7494982cf3d21aa4bf29a, entries=150, sequenceid=355, filesize=12.0 K 2024-12-10T03:35:49,359 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=6.71 KB/6870 for b427273a9970e4bd4ba7fd36e4602947 in 1276ms, sequenceid=355, compaction requested=true 2024-12-10T03:35:49,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:49,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:35:49,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:49,359 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:35:49,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:35:49,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:49,359 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:35:49,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b427273a9970e4bd4ba7fd36e4602947:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:35:49,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:49,361 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 59471 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:35:49,361 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61911 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:35:49,361 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/A is initiating minor compaction (all files) 2024-12-10T03:35:49,361 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/B is initiating minor compaction (all files) 2024-12-10T03:35:49,361 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/B in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:49,361 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/A in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:49,361 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f5b8c64e30ff41748f4d9d8294ab85ca, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/0b82e1d3d45d452dad0583f207feb832, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/b941eb7728284637a6df5eb599939148, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/fc149b1bb2224319bef346d743d5a501, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/c6f351f3e0b945c3a67bbb3c6bc6c684] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=60.5 K 2024-12-10T03:35:49,361 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/1330d2f8fbe645b8bce74268e6ea3e88, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/9d618c64efdf467c811c3da1598d614c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/cde20e14a377426ab91d710627e098b1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f451f395f40e423e8ad83109e9ed4070, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0a374d6c3ed746c9b466967e3a47202c] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=58.1 K 2024-12-10T03:35:49,361 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5b8c64e30ff41748f4d9d8294ab85ca, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801741401 2024-12-10T03:35:49,361 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 1330d2f8fbe645b8bce74268e6ea3e88, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801741401 2024-12-10T03:35:49,362 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d618c64efdf467c811c3da1598d614c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733801743551 2024-12-10T03:35:49,362 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b82e1d3d45d452dad0583f207feb832, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733801743549 2024-12-10T03:35:49,362 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting cde20e14a377426ab91d710627e098b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733801743640 2024-12-10T03:35:49,362 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting b941eb7728284637a6df5eb599939148, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733801743640 2024-12-10T03:35:49,362 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f451f395f40e423e8ad83109e9ed4070, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733801744782 2024-12-10T03:35:49,362 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc149b1bb2224319bef346d743d5a501, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733801744782 2024-12-10T03:35:49,363 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a374d6c3ed746c9b466967e3a47202c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733801745950 2024-12-10T03:35:49,363 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6f351f3e0b945c3a67bbb3c6bc6c684, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733801745950 2024-12-10T03:35:49,371 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#A#compaction#400 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:49,371 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#B#compaction#399 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:49,371 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/18a5e38ba1784bb1a3044242e41d78b1 is 50, key is test_row_0/A:col10/1733801748080/Put/seqid=0 2024-12-10T03:35:49,371 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/42eee792f4a24ecab16269c4aa88198d is 50, key is test_row_0/B:col10/1733801748080/Put/seqid=0 2024-12-10T03:35:49,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742290_1466 (size=12881) 2024-12-10T03:35:49,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742291_1467 (size=12881) 2024-12-10T03:35:49,786 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/18a5e38ba1784bb1a3044242e41d78b1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/18a5e38ba1784bb1a3044242e41d78b1 2024-12-10T03:35:49,786 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/42eee792f4a24ecab16269c4aa88198d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/42eee792f4a24ecab16269c4aa88198d 2024-12-10T03:35:49,791 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/B of b427273a9970e4bd4ba7fd36e4602947 into 42eee792f4a24ecab16269c4aa88198d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:49,791 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/A of b427273a9970e4bd4ba7fd36e4602947 into 18a5e38ba1784bb1a3044242e41d78b1(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:49,791 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:49,791 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:49,791 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/A, priority=11, startTime=1733801749359; duration=0sec 2024-12-10T03:35:49,791 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/B, priority=11, startTime=1733801749359; duration=0sec 2024-12-10T03:35:49,792 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:35:49,792 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:A 2024-12-10T03:35:49,792 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:49,792 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:B 2024-12-10T03:35:49,792 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T03:35:49,793 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 59607 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T03:35:49,793 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): b427273a9970e4bd4ba7fd36e4602947/C is initiating minor compaction (all files) 2024-12-10T03:35:49,793 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b427273a9970e4bd4ba7fd36e4602947/C in TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:49,793 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e8f4824a7bcd4c24977b6e0990c6c2be, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/d14004a88c3b421a834e720e62db4027, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a87991db1011437dbdd7e283354f1caf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/38c8752b46874263ae748a6be7593267, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2bf31698ecd7494982cf3d21aa4bf29a] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp, totalSize=58.2 K 2024-12-10T03:35:49,794 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8f4824a7bcd4c24977b6e0990c6c2be, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733801741401 2024-12-10T03:35:49,794 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting d14004a88c3b421a834e720e62db4027, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733801743551 2024-12-10T03:35:49,795 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a87991db1011437dbdd7e283354f1caf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733801743640 2024-12-10T03:35:49,795 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38c8752b46874263ae748a6be7593267, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1733801744782 2024-12-10T03:35:49,795 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bf31698ecd7494982cf3d21aa4bf29a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733801745950 2024-12-10T03:35:49,807 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b427273a9970e4bd4ba7fd36e4602947#C#compaction#401 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:35:49,807 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/154317ff696c4828a8e3e414b3eb2293 is 50, key is test_row_0/C:col10/1733801748080/Put/seqid=0 2024-12-10T03:35:49,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742292_1468 (size=13017) 2024-12-10T03:35:49,817 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/154317ff696c4828a8e3e414b3eb2293 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/154317ff696c4828a8e3e414b3eb2293 2024-12-10T03:35:49,822 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in b427273a9970e4bd4ba7fd36e4602947/C of b427273a9970e4bd4ba7fd36e4602947 into 154317ff696c4828a8e3e414b3eb2293(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:35:49,822 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:49,822 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947., storeName=b427273a9970e4bd4ba7fd36e4602947/C, priority=11, startTime=1733801749359; duration=0sec 2024-12-10T03:35:49,822 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:35:49,822 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b427273a9970e4bd4ba7fd36e4602947:C 2024-12-10T03:35:50,195 DEBUG [Thread-1783 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34b30c39 to 127.0.0.1:51621 2024-12-10T03:35:50,195 DEBUG [Thread-1783 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:50,204 DEBUG [Thread-1775 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7177efc9 to 127.0.0.1:51621 2024-12-10T03:35:50,204 DEBUG [Thread-1775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:53,670 DEBUG [Thread-1781 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b0c2472 to 127.0.0.1:51621 2024-12-10T03:35:53,670 DEBUG [Thread-1781 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 29 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 4534 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 13602 rows 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 4513 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 13538 rows 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 4510 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 13530 rows 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 4535 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 13605 rows 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 4557 2024-12-10T03:35:53,671 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 13670 rows 2024-12-10T03:35:53,671 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T03:35:53,671 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x695c2253 to 127.0.0.1:51621 2024-12-10T03:35:53,671 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:35:53,673 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T03:35:53,673 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T03:35:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T03:35:53,677 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801753677"}]},"ts":"1733801753677"} 2024-12-10T03:35:53,678 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T03:35:53,725 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T03:35:53,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:35:53,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b427273a9970e4bd4ba7fd36e4602947, UNASSIGN}] 2024-12-10T03:35:53,729 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=140, ppid=139, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b427273a9970e4bd4ba7fd36e4602947, UNASSIGN 2024-12-10T03:35:53,731 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=140 updating hbase:meta row=b427273a9970e4bd4ba7fd36e4602947, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:53,732 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:35:53,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; CloseRegionProcedure b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:35:53,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T03:35:53,885 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:53,886 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(124): Close b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:53,886 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:35:53,886 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1681): Closing b427273a9970e4bd4ba7fd36e4602947, disabling compactions & flushes 2024-12-10T03:35:53,886 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:53,886 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:53,886 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. after waiting 0 ms 2024-12-10T03:35:53,886 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:53,887 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(2837): Flushing b427273a9970e4bd4ba7fd36e4602947 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T03:35:53,887 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=A 2024-12-10T03:35:53,887 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:53,887 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=B 2024-12-10T03:35:53,888 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:53,888 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b427273a9970e4bd4ba7fd36e4602947, store=C 2024-12-10T03:35:53,888 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:53,894 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/2151d10ca3354eb88b92ce60440f5f9d is 50, key is test_row_0/A:col10/1733801750194/Put/seqid=0 2024-12-10T03:35:53,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742293_1469 (size=12301) 2024-12-10T03:35:53,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T03:35:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T03:35:54,301 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/2151d10ca3354eb88b92ce60440f5f9d 2024-12-10T03:35:54,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/e3cd875e1c6f42889046957544bfe068 is 50, key is test_row_0/B:col10/1733801750194/Put/seqid=0 2024-12-10T03:35:54,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742294_1470 (size=12301) 2024-12-10T03:35:54,720 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/e3cd875e1c6f42889046957544bfe068 2024-12-10T03:35:54,728 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/5ce4320757c04f46b1e8aab59fd92dd5 is 50, key is test_row_0/C:col10/1733801750194/Put/seqid=0 2024-12-10T03:35:54,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742295_1471 (size=12301) 2024-12-10T03:35:54,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T03:35:55,134 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/5ce4320757c04f46b1e8aab59fd92dd5 2024-12-10T03:35:55,143 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/A/2151d10ca3354eb88b92ce60440f5f9d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/2151d10ca3354eb88b92ce60440f5f9d 2024-12-10T03:35:55,148 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/2151d10ca3354eb88b92ce60440f5f9d, entries=150, sequenceid=365, filesize=12.0 K 2024-12-10T03:35:55,149 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/B/e3cd875e1c6f42889046957544bfe068 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/e3cd875e1c6f42889046957544bfe068 2024-12-10T03:35:55,152 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/e3cd875e1c6f42889046957544bfe068, entries=150, sequenceid=365, filesize=12.0 K 2024-12-10T03:35:55,153 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/.tmp/C/5ce4320757c04f46b1e8aab59fd92dd5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5ce4320757c04f46b1e8aab59fd92dd5 2024-12-10T03:35:55,156 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5ce4320757c04f46b1e8aab59fd92dd5, entries=150, sequenceid=365, filesize=12.0 K 2024-12-10T03:35:55,156 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for b427273a9970e4bd4ba7fd36e4602947 in 1270ms, sequenceid=365, compaction requested=false 2024-12-10T03:35:55,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/861e5c25e5a7435a93d1d996ad3d7606, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ea7b4e3707ed4e5dbbb6750b3a831f09, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ec6da52c87534ecea6c36cbc2db6d720, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ae25fa6f7bed47f98775a15c56752622, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/953241278d54463887b1fcf862b799c2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/849baa3c037c46dd8056c48c7710f494, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f276b12ab96e475e9fcd93541099087f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/184785cc3d0846b0b9d1ad1abe7c2a46, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/62f227fede09428a931d48a284d968ba, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/85e6e77414274e3b85612db58a0d5316, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/46ab81a6d5804d0296ba20c8427457aa, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f5b8c64e30ff41748f4d9d8294ab85ca, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/191ebfaf00734b21ace2e436bc9a3030, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/0b82e1d3d45d452dad0583f207feb832, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/b941eb7728284637a6df5eb599939148, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/fc149b1bb2224319bef346d743d5a501, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/c6f351f3e0b945c3a67bbb3c6bc6c684] to archive 2024-12-10T03:35:55,157 DEBUG [StoreCloser-TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:55,159 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/861e5c25e5a7435a93d1d996ad3d7606 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/861e5c25e5a7435a93d1d996ad3d7606 2024-12-10T03:35:55,159 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ea7b4e3707ed4e5dbbb6750b3a831f09 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ea7b4e3707ed4e5dbbb6750b3a831f09 2024-12-10T03:35:55,160 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/953241278d54463887b1fcf862b799c2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/953241278d54463887b1fcf862b799c2 2024-12-10T03:35:55,160 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ec6da52c87534ecea6c36cbc2db6d720 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ec6da52c87534ecea6c36cbc2db6d720 2024-12-10T03:35:55,160 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/184785cc3d0846b0b9d1ad1abe7c2a46 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/184785cc3d0846b0b9d1ad1abe7c2a46 2024-12-10T03:35:55,160 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ae25fa6f7bed47f98775a15c56752622 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/ae25fa6f7bed47f98775a15c56752622 2024-12-10T03:35:55,160 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f276b12ab96e475e9fcd93541099087f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f276b12ab96e475e9fcd93541099087f 2024-12-10T03:35:55,160 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/849baa3c037c46dd8056c48c7710f494 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/849baa3c037c46dd8056c48c7710f494 2024-12-10T03:35:55,162 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/46ab81a6d5804d0296ba20c8427457aa to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/46ab81a6d5804d0296ba20c8427457aa 2024-12-10T03:35:55,162 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/85e6e77414274e3b85612db58a0d5316 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/85e6e77414274e3b85612db58a0d5316 2024-12-10T03:35:55,162 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f5b8c64e30ff41748f4d9d8294ab85ca to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/f5b8c64e30ff41748f4d9d8294ab85ca 2024-12-10T03:35:55,162 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/62f227fede09428a931d48a284d968ba to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/62f227fede09428a931d48a284d968ba 2024-12-10T03:35:55,162 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/191ebfaf00734b21ace2e436bc9a3030 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/191ebfaf00734b21ace2e436bc9a3030 2024-12-10T03:35:55,162 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/b941eb7728284637a6df5eb599939148 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/b941eb7728284637a6df5eb599939148 2024-12-10T03:35:55,162 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/0b82e1d3d45d452dad0583f207feb832 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/0b82e1d3d45d452dad0583f207feb832 2024-12-10T03:35:55,162 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/fc149b1bb2224319bef346d743d5a501 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/fc149b1bb2224319bef346d743d5a501 2024-12-10T03:35:55,163 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/c6f351f3e0b945c3a67bbb3c6bc6c684 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/c6f351f3e0b945c3a67bbb3c6bc6c684 2024-12-10T03:35:55,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/7e4711e511bc4f48a97f6b7fe5fd06d0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/3cd6433654484322ad7a07351fb3e8f0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/459f669715714e60ba888ba80805f8c7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/eb78970f5c4743feb996f33770806929, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/52e9888636cd4446a6178e06e314a26a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/242923957ab64caab0c8b69ccfabd882, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60342431768a41e0bf687ff47bf80916, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8568bf96f3134f09bda505ec32f45743, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f77ce9d5a6444c6c8aaf274026df991a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f17d4f55b71d43088ddd88cadbda49a9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/a17e3c431ea1433b9fffcc0b09ba71c9, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/1330d2f8fbe645b8bce74268e6ea3e88, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/dee0991e8a094c469a23ea308395ebba, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/9d618c64efdf467c811c3da1598d614c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/cde20e14a377426ab91d710627e098b1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f451f395f40e423e8ad83109e9ed4070, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0a374d6c3ed746c9b466967e3a47202c] to archive 2024-12-10T03:35:55,165 DEBUG [StoreCloser-TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:55,167 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/7e4711e511bc4f48a97f6b7fe5fd06d0 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/7e4711e511bc4f48a97f6b7fe5fd06d0 2024-12-10T03:35:55,167 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/459f669715714e60ba888ba80805f8c7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/459f669715714e60ba888ba80805f8c7 2024-12-10T03:35:55,167 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/eb78970f5c4743feb996f33770806929 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/eb78970f5c4743feb996f33770806929 2024-12-10T03:35:55,167 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/242923957ab64caab0c8b69ccfabd882 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/242923957ab64caab0c8b69ccfabd882 2024-12-10T03:35:55,167 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60342431768a41e0bf687ff47bf80916 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/60342431768a41e0bf687ff47bf80916 2024-12-10T03:35:55,167 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/3cd6433654484322ad7a07351fb3e8f0 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/3cd6433654484322ad7a07351fb3e8f0 2024-12-10T03:35:55,167 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8568bf96f3134f09bda505ec32f45743 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/8568bf96f3134f09bda505ec32f45743 2024-12-10T03:35:55,167 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/52e9888636cd4446a6178e06e314a26a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/52e9888636cd4446a6178e06e314a26a 2024-12-10T03:35:55,168 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/a17e3c431ea1433b9fffcc0b09ba71c9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/a17e3c431ea1433b9fffcc0b09ba71c9 2024-12-10T03:35:55,169 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f17d4f55b71d43088ddd88cadbda49a9 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f17d4f55b71d43088ddd88cadbda49a9 2024-12-10T03:35:55,169 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f77ce9d5a6444c6c8aaf274026df991a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f77ce9d5a6444c6c8aaf274026df991a 2024-12-10T03:35:55,169 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/dee0991e8a094c469a23ea308395ebba to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/dee0991e8a094c469a23ea308395ebba 2024-12-10T03:35:55,169 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/1330d2f8fbe645b8bce74268e6ea3e88 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/1330d2f8fbe645b8bce74268e6ea3e88 2024-12-10T03:35:55,169 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/cde20e14a377426ab91d710627e098b1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/cde20e14a377426ab91d710627e098b1 2024-12-10T03:35:55,169 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/9d618c64efdf467c811c3da1598d614c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/9d618c64efdf467c811c3da1598d614c 2024-12-10T03:35:55,169 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f451f395f40e423e8ad83109e9ed4070 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/f451f395f40e423e8ad83109e9ed4070 2024-12-10T03:35:55,169 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0a374d6c3ed746c9b466967e3a47202c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/0a374d6c3ed746c9b466967e3a47202c 2024-12-10T03:35:55,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/249464a1e27a416a9fd13851bc598e3b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8d292c594c694bab9cb9ca84342db432, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a4e3f4f8098f4375b8de1386c227ce54, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7af951a10bc84bdba76566a5f5c01c4f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/89eabd3380b4442e9b7f641fe129d655, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2db5e9a81e534f16a4d02c4f9fce1d7f, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fe8e9b42562e46c69171a2d36240b134, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7c3630a4cb7d475082ec144ddcd5d6be, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5c35a93bb0214ba9ac39197371dafa20, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a485f8463e684014ada960fe7fee3f26, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/b7b21e8f533d46f8afd8ac93d5a7c30c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fcedbdff77604eb58cddd1fb5d7b7828, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/81715d4ff94a42d394c184020533ceae, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e176a748372a4d7699b56dd49c0fa3bb, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a284ab9f0acb47c0a302f7ef0c3aba2d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e8f4824a7bcd4c24977b6e0990c6c2be, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/ac0daf91dd70482e9a7561b2041f305c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/d14004a88c3b421a834e720e62db4027, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a87991db1011437dbdd7e283354f1caf, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/38c8752b46874263ae748a6be7593267, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2bf31698ecd7494982cf3d21aa4bf29a] to archive 2024-12-10T03:35:55,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:35:55,175 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7af951a10bc84bdba76566a5f5c01c4f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7af951a10bc84bdba76566a5f5c01c4f 2024-12-10T03:35:55,175 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2db5e9a81e534f16a4d02c4f9fce1d7f to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2db5e9a81e534f16a4d02c4f9fce1d7f 2024-12-10T03:35:55,175 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/249464a1e27a416a9fd13851bc598e3b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/249464a1e27a416a9fd13851bc598e3b 2024-12-10T03:35:55,175 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/89eabd3380b4442e9b7f641fe129d655 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/89eabd3380b4442e9b7f641fe129d655 2024-12-10T03:35:55,175 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fe8e9b42562e46c69171a2d36240b134 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fe8e9b42562e46c69171a2d36240b134 2024-12-10T03:35:55,175 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8d292c594c694bab9cb9ca84342db432 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/8d292c594c694bab9cb9ca84342db432 2024-12-10T03:35:55,175 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7c3630a4cb7d475082ec144ddcd5d6be to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/7c3630a4cb7d475082ec144ddcd5d6be 2024-12-10T03:35:55,175 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a4e3f4f8098f4375b8de1386c227ce54 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a4e3f4f8098f4375b8de1386c227ce54 2024-12-10T03:35:55,176 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5c35a93bb0214ba9ac39197371dafa20 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5c35a93bb0214ba9ac39197371dafa20 2024-12-10T03:35:55,176 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/b7b21e8f533d46f8afd8ac93d5a7c30c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/b7b21e8f533d46f8afd8ac93d5a7c30c 2024-12-10T03:35:55,176 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fcedbdff77604eb58cddd1fb5d7b7828 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/fcedbdff77604eb58cddd1fb5d7b7828 2024-12-10T03:35:55,176 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a485f8463e684014ada960fe7fee3f26 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a485f8463e684014ada960fe7fee3f26 2024-12-10T03:35:55,176 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/81715d4ff94a42d394c184020533ceae to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/81715d4ff94a42d394c184020533ceae 2024-12-10T03:35:55,176 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a284ab9f0acb47c0a302f7ef0c3aba2d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a284ab9f0acb47c0a302f7ef0c3aba2d 2024-12-10T03:35:55,176 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e176a748372a4d7699b56dd49c0fa3bb to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e176a748372a4d7699b56dd49c0fa3bb 2024-12-10T03:35:55,177 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e8f4824a7bcd4c24977b6e0990c6c2be to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/e8f4824a7bcd4c24977b6e0990c6c2be 2024-12-10T03:35:55,177 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/ac0daf91dd70482e9a7561b2041f305c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/ac0daf91dd70482e9a7561b2041f305c 2024-12-10T03:35:55,177 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/d14004a88c3b421a834e720e62db4027 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/d14004a88c3b421a834e720e62db4027 2024-12-10T03:35:55,177 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a87991db1011437dbdd7e283354f1caf to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/a87991db1011437dbdd7e283354f1caf 2024-12-10T03:35:55,177 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/38c8752b46874263ae748a6be7593267 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/38c8752b46874263ae748a6be7593267 2024-12-10T03:35:55,177 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2bf31698ecd7494982cf3d21aa4bf29a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/2bf31698ecd7494982cf3d21aa4bf29a 2024-12-10T03:35:55,180 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/recovered.edits/368.seqid, newMaxSeqId=368, maxSeqId=1 2024-12-10T03:35:55,180 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947. 2024-12-10T03:35:55,181 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] regionserver.HRegion(1635): Region close journal for b427273a9970e4bd4ba7fd36e4602947: 2024-12-10T03:35:55,182 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=141}] handler.UnassignRegionHandler(170): Closed b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:55,182 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=140 updating hbase:meta row=b427273a9970e4bd4ba7fd36e4602947, regionState=CLOSED 2024-12-10T03:35:55,183 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-10T03:35:55,183 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseRegionProcedure b427273a9970e4bd4ba7fd36e4602947, server=50b9ef1c5472,37553,1733801610862 in 1.4500 sec 2024-12-10T03:35:55,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=139 2024-12-10T03:35:55,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=139, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b427273a9970e4bd4ba7fd36e4602947, UNASSIGN in 1.4550 sec 2024-12-10T03:35:55,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-10T03:35:55,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4580 sec 2024-12-10T03:35:55,186 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801755186"}]},"ts":"1733801755186"} 2024-12-10T03:35:55,186 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T03:35:55,198 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T03:35:55,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5250 sec 2024-12-10T03:35:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T03:35:55,786 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-10T03:35:55,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T03:35:55,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:55,791 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=142, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:55,793 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=142, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-10T03:35:55,797 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:55,802 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/recovered.edits] 2024-12-10T03:35:55,805 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/2151d10ca3354eb88b92ce60440f5f9d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/2151d10ca3354eb88b92ce60440f5f9d 2024-12-10T03:35:55,805 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/18a5e38ba1784bb1a3044242e41d78b1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/A/18a5e38ba1784bb1a3044242e41d78b1 2024-12-10T03:35:55,808 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/e3cd875e1c6f42889046957544bfe068 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/e3cd875e1c6f42889046957544bfe068 2024-12-10T03:35:55,808 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/42eee792f4a24ecab16269c4aa88198d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/B/42eee792f4a24ecab16269c4aa88198d 2024-12-10T03:35:55,811 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5ce4320757c04f46b1e8aab59fd92dd5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/5ce4320757c04f46b1e8aab59fd92dd5 2024-12-10T03:35:55,811 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/154317ff696c4828a8e3e414b3eb2293 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/C/154317ff696c4828a8e3e414b3eb2293 2024-12-10T03:35:55,814 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/recovered.edits/368.seqid to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947/recovered.edits/368.seqid 2024-12-10T03:35:55,815 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/b427273a9970e4bd4ba7fd36e4602947 2024-12-10T03:35:55,815 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T03:35:55,818 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=142, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:55,820 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T03:35:55,822 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T03:35:55,823 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=142, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:55,823 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T03:35:55,823 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733801755823"}]},"ts":"9223372036854775807"} 2024-12-10T03:35:55,825 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T03:35:55,825 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b427273a9970e4bd4ba7fd36e4602947, NAME => 'TestAcidGuarantees,,1733801724285.b427273a9970e4bd4ba7fd36e4602947.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T03:35:55,825 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T03:35:55,826 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733801755825"}]},"ts":"9223372036854775807"} 2024-12-10T03:35:55,827 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T03:35:55,874 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=142, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:55,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 86 msec 2024-12-10T03:35:55,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-10T03:35:55,894 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-10T03:35:55,906 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244 (was 245), OpenFileDescriptor=453 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=295 (was 268) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3287 (was 3300) 2024-12-10T03:35:55,914 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=295, ProcessCount=11, AvailableMemoryMB=3286 2024-12-10T03:35:55,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:35:55,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:35:55,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:55,917 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T03:35:55,917 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:55,917 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 143 2024-12-10T03:35:55,918 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T03:35:55,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-10T03:35:55,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742296_1472 (size=963) 2024-12-10T03:35:56,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-10T03:35:56,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-10T03:35:56,329 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a 2024-12-10T03:35:56,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742297_1473 (size=53) 2024-12-10T03:35:56,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-10T03:35:56,739 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:35:56,740 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 177cb6154ba45b2806835557c21cb1d3, disabling compactions & flushes 2024-12-10T03:35:56,740 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:56,740 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:56,740 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. after waiting 0 ms 2024-12-10T03:35:56,740 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:56,740 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:56,740 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:35:56,742 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T03:35:56,743 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733801756742"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733801756742"}]},"ts":"1733801756742"} 2024-12-10T03:35:56,745 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T03:35:56,747 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T03:35:56,748 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801756747"}]},"ts":"1733801756747"} 2024-12-10T03:35:56,749 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T03:35:56,801 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, ASSIGN}] 2024-12-10T03:35:56,802 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, ASSIGN 2024-12-10T03:35:56,803 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, ASSIGN; state=OFFLINE, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=false 2024-12-10T03:35:56,954 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=177cb6154ba45b2806835557c21cb1d3, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:56,955 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; OpenRegionProcedure 177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:35:57,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-10T03:35:57,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:57,113 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:57,113 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7285): Opening region: {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:35:57,114 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,114 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:35:57,114 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7327): checking encryption for 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,114 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7330): checking classloading for 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,116 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,118 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:57,118 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 177cb6154ba45b2806835557c21cb1d3 columnFamilyName A 2024-12-10T03:35:57,119 DEBUG [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:57,119 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(327): Store=177cb6154ba45b2806835557c21cb1d3/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:57,120 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,121 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:57,121 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 177cb6154ba45b2806835557c21cb1d3 columnFamilyName B 2024-12-10T03:35:57,122 DEBUG [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:57,122 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(327): Store=177cb6154ba45b2806835557c21cb1d3/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:57,122 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,123 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:57,123 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 177cb6154ba45b2806835557c21cb1d3 columnFamilyName C 2024-12-10T03:35:57,124 DEBUG [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:57,124 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(327): Store=177cb6154ba45b2806835557c21cb1d3/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:57,124 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:57,125 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,125 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,126 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:35:57,128 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1085): writing seq id for 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:57,129 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T03:35:57,130 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1102): Opened 177cb6154ba45b2806835557c21cb1d3; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62672423, jitterRate=-0.06610812246799469}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:35:57,130 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1001): Region open journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:35:57,131 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., pid=145, masterSystemTime=1733801757108 2024-12-10T03:35:57,132 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:57,132 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:57,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=177cb6154ba45b2806835557c21cb1d3, regionState=OPEN, openSeqNum=2, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:57,134 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-10T03:35:57,134 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; OpenRegionProcedure 177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 in 178 msec 2024-12-10T03:35:57,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=144, resume processing ppid=143 2024-12-10T03:35:57,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, ppid=143, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, ASSIGN in 334 msec 2024-12-10T03:35:57,136 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T03:35:57,136 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801757136"}]},"ts":"1733801757136"} 2024-12-10T03:35:57,137 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T03:35:57,148 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T03:35:57,149 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2320 sec 2024-12-10T03:35:58,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-10T03:35:58,028 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 143 completed 2024-12-10T03:35:58,031 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-12-10T03:35:58,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:58,094 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:58,095 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52750, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:58,096 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T03:35:58,097 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T03:35:58,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T03:35:58,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T03:35:58,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T03:35:58,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742298_1474 (size=999) 2024-12-10T03:35:58,515 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T03:35:58,515 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T03:35:58,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:35:58,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, REOPEN/MOVE}] 2024-12-10T03:35:58,524 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, REOPEN/MOVE 2024-12-10T03:35:58,524 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=177cb6154ba45b2806835557c21cb1d3, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:58,525 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:35:58,525 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; CloseRegionProcedure 177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:35:58,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:58,678 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(124): Close 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:58,678 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:35:58,678 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1681): Closing 177cb6154ba45b2806835557c21cb1d3, disabling compactions & flushes 2024-12-10T03:35:58,678 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:58,678 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:58,679 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. after waiting 0 ms 2024-12-10T03:35:58,679 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:58,687 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T03:35:58,688 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:58,688 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1635): Region close journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:35:58,688 WARN [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionServer(3786): Not adding moved region record: 177cb6154ba45b2806835557c21cb1d3 to self. 2024-12-10T03:35:58,689 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(170): Closed 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:58,690 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=177cb6154ba45b2806835557c21cb1d3, regionState=CLOSED 2024-12-10T03:35:58,692 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-10T03:35:58,692 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseRegionProcedure 177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 in 165 msec 2024-12-10T03:35:58,692 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, REOPEN/MOVE; state=CLOSED, location=50b9ef1c5472,37553,1733801610862; forceNewPlan=false, retain=true 2024-12-10T03:35:58,843 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=177cb6154ba45b2806835557c21cb1d3, regionState=OPENING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:58,846 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=148, state=RUNNABLE; OpenRegionProcedure 177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:35:58,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,006 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,006 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} 2024-12-10T03:35:59,007 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,007 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T03:35:59,007 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,007 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,010 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,011 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:59,011 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 177cb6154ba45b2806835557c21cb1d3 columnFamilyName A 2024-12-10T03:35:59,012 DEBUG [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:59,013 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(327): Store=177cb6154ba45b2806835557c21cb1d3/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:59,013 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,014 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:59,014 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 177cb6154ba45b2806835557c21cb1d3 columnFamilyName B 2024-12-10T03:35:59,014 DEBUG [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:59,015 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(327): Store=177cb6154ba45b2806835557c21cb1d3/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:59,015 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,016 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T03:35:59,016 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 177cb6154ba45b2806835557c21cb1d3 columnFamilyName C 2024-12-10T03:35:59,016 DEBUG [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:59,017 INFO [StoreOpener-177cb6154ba45b2806835557c21cb1d3-1 {}] regionserver.HStore(327): Store=177cb6154ba45b2806835557c21cb1d3/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T03:35:59,017 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,018 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,019 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,021 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T03:35:59,023 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,024 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened 177cb6154ba45b2806835557c21cb1d3; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68154364, jitterRate=0.015579164028167725}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T03:35:59,025 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:35:59,026 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., pid=150, masterSystemTime=1733801758999 2024-12-10T03:35:59,027 DEBUG [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,027 INFO [RS_OPEN_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,028 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=177cb6154ba45b2806835557c21cb1d3, regionState=OPEN, openSeqNum=5, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,031 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=148 2024-12-10T03:35:59,031 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=148, state=SUCCESS; OpenRegionProcedure 177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 in 184 msec 2024-12-10T03:35:59,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-10T03:35:59,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, REOPEN/MOVE in 508 msec 2024-12-10T03:35:59,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-10T03:35:59,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-12-10T03:35:59,037 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 936 msec 2024-12-10T03:35:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-10T03:35:59,040 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-12-10T03:35:59,083 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,085 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-12-10T03:35:59,098 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,099 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-12-10T03:35:59,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,110 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-12-10T03:35:59,117 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,118 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-12-10T03:35:59,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,126 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-12-10T03:35:59,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,135 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-12-10T03:35:59,142 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,143 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-12-10T03:35:59,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,151 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-12-10T03:35:59,159 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,159 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:51621 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-12-10T03:35:59,167 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T03:35:59,170 DEBUG [hconnection-0x21fdeec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,170 DEBUG [hconnection-0x4daf417f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:35:59,171 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52754, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,171 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees 2024-12-10T03:35:59,171 DEBUG [hconnection-0x1dbc0d4b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,171 DEBUG [hconnection-0x5680e00d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,172 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:35:59,172 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-10T03:35:59,173 DEBUG [hconnection-0x2f349621-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,173 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,173 DEBUG [hconnection-0x4acb31b2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,173 DEBUG [hconnection-0x5a9d300d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,173 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:35:59,173 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,174 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:35:59,174 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,174 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,175 DEBUG [hconnection-0xd567956-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,175 DEBUG [hconnection-0x39938f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,175 DEBUG [hconnection-0x4a3a7b34-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T03:35:59,176 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,176 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,176 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T03:35:59,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,180 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T03:35:59,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:35:59,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:59,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:35:59,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:59,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:35:59,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:35:59,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210254ff424b63f4861a62d5b237a5e9ac2_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801759179/Put/seqid=0 2024-12-10T03:35:59,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801819198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801819201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801819200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801819201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742299_1475 (size=12154) 2024-12-10T03:35:59,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801819207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,209 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:35:59,211 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210254ff424b63f4861a62d5b237a5e9ac2_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210254ff424b63f4861a62d5b237a5e9ac2_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:35:59,212 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/8a07313a0c97400f9b6c6e01ab26f2e3, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:35:59,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/8a07313a0c97400f9b6c6e01ab26f2e3 is 175, key is test_row_0/A:col10/1733801759179/Put/seqid=0 2024-12-10T03:35:59,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742300_1476 (size=30955) 2024-12-10T03:35:59,216 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/8a07313a0c97400f9b6c6e01ab26f2e3 2024-12-10T03:35:59,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/aaed304551854108bc98c77e32d6f726 is 50, key is test_row_0/B:col10/1733801759179/Put/seqid=0 2024-12-10T03:35:59,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742301_1477 (size=12001) 2024-12-10T03:35:59,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-10T03:35:59,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801819302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801819304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801819304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801819304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801819309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,325 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-10T03:35:59,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:35:59,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,373 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T03:35:59,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-10T03:35:59,477 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-10T03:35:59,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:35:59,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801819504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801819506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801819506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801819506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801819512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,629 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-10T03:35:59,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:35:59,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/aaed304551854108bc98c77e32d6f726 2024-12-10T03:35:59,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/879a158e96e1447aa204dfe1f4a366fe is 50, key is test_row_0/C:col10/1733801759179/Put/seqid=0 2024-12-10T03:35:59,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742302_1478 (size=12001) 2024-12-10T03:35:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-10T03:35:59,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-10T03:35:59,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:35:59,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801819807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801819808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801819808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801819810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:35:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801819813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:35:59,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-10T03:35:59,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:35:59,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:35:59,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:35:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:00,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/879a158e96e1447aa204dfe1f4a366fe 2024-12-10T03:36:00,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/8a07313a0c97400f9b6c6e01ab26f2e3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8a07313a0c97400f9b6c6e01ab26f2e3 2024-12-10T03:36:00,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8a07313a0c97400f9b6c6e01ab26f2e3, entries=150, sequenceid=16, filesize=30.2 K 2024-12-10T03:36:00,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/aaed304551854108bc98c77e32d6f726 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/aaed304551854108bc98c77e32d6f726 2024-12-10T03:36:00,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/aaed304551854108bc98c77e32d6f726, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T03:36:00,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/879a158e96e1447aa204dfe1f4a366fe as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/879a158e96e1447aa204dfe1f4a366fe 2024-12-10T03:36:00,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/879a158e96e1447aa204dfe1f4a366fe, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T03:36:00,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 177cb6154ba45b2806835557c21cb1d3 in 901ms, sequenceid=16, compaction requested=false 2024-12-10T03:36:00,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:00,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-10T03:36:00,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:00,087 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:36:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:00,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:00,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101049713dda494ff1983d1c5d4f91b256_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801759200/Put/seqid=0 2024-12-10T03:36:00,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742303_1479 (size=12154) 2024-12-10T03:36:00,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:00,100 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101049713dda494ff1983d1c5d4f91b256_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101049713dda494ff1983d1c5d4f91b256_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:00,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/8edce30291014bbe83bfc439b618fda5, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:00,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/8edce30291014bbe83bfc439b618fda5 is 175, key is test_row_0/A:col10/1733801759200/Put/seqid=0 2024-12-10T03:36:00,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742304_1480 (size=30955) 2024-12-10T03:36:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-10T03:36:00,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:00,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:00,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801820314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801820314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801820315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801820315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801820315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801820416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801820417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801820417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801820417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,504 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/8edce30291014bbe83bfc439b618fda5 2024-12-10T03:36:00,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/21735d3c0bac43508c7b761dda1d1ca4 is 50, key is test_row_0/B:col10/1733801759200/Put/seqid=0 2024-12-10T03:36:00,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742305_1481 (size=12001) 2024-12-10T03:36:00,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801820617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801820620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801820620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801820620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,695 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T03:36:00,913 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/21735d3c0bac43508c7b761dda1d1ca4 2024-12-10T03:36:00,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/9ca35b3596ba4c1e99e9cc45a2a25ffe is 50, key is test_row_0/C:col10/1733801759200/Put/seqid=0 2024-12-10T03:36:00,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801820919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742306_1482 (size=12001) 2024-12-10T03:36:00,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801820923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801820923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:00,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:00,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801820923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-10T03:36:01,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801821317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,321 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/9ca35b3596ba4c1e99e9cc45a2a25ffe 2024-12-10T03:36:01,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/8edce30291014bbe83bfc439b618fda5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8edce30291014bbe83bfc439b618fda5 2024-12-10T03:36:01,326 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8edce30291014bbe83bfc439b618fda5, entries=150, sequenceid=41, filesize=30.2 K 2024-12-10T03:36:01,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/21735d3c0bac43508c7b761dda1d1ca4 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/21735d3c0bac43508c7b761dda1d1ca4 2024-12-10T03:36:01,329 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/21735d3c0bac43508c7b761dda1d1ca4, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T03:36:01,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/9ca35b3596ba4c1e99e9cc45a2a25ffe as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9ca35b3596ba4c1e99e9cc45a2a25ffe 2024-12-10T03:36:01,332 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9ca35b3596ba4c1e99e9cc45a2a25ffe, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T03:36:01,333 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 177cb6154ba45b2806835557c21cb1d3 in 1247ms, sequenceid=41, compaction requested=false 2024-12-10T03:36:01,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:01,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:01,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=152 2024-12-10T03:36:01,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=152 2024-12-10T03:36:01,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-10T03:36:01,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1610 sec 2024-12-10T03:36:01,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees in 2.1660 sec 2024-12-10T03:36:01,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:01,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T03:36:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:01,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:01,426 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:01,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121062f62e397a1b4a37bbda029af246da31_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801760314/Put/seqid=0 2024-12-10T03:36:01,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742307_1483 (size=14594) 2024-12-10T03:36:01,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801821440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801821441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801821441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801821441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801821543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801821544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801821544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801821544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801821746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801821746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801821747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:01,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801821747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:01,838 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:01,841 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121062f62e397a1b4a37bbda029af246da31_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121062f62e397a1b4a37bbda029af246da31_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:01,841 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/c557a57b9b284678a0c91bf3cbe09605, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:01,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/c557a57b9b284678a0c91bf3cbe09605 is 175, key is test_row_0/A:col10/1733801760314/Put/seqid=0 2024-12-10T03:36:01,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742308_1484 (size=39549) 2024-12-10T03:36:01,845 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/c557a57b9b284678a0c91bf3cbe09605 2024-12-10T03:36:01,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/96376090310947fe9f10bfdac16b1958 is 50, key is test_row_0/B:col10/1733801760314/Put/seqid=0 2024-12-10T03:36:01,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742309_1485 (size=12001) 2024-12-10T03:36:02,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:02,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801822048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:02,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:02,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801822049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:02,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:02,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801822049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:02,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801822051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:02,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/96376090310947fe9f10bfdac16b1958 2024-12-10T03:36:02,259 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/9c7fa86718094911bd394509d601cefe is 50, key is test_row_0/C:col10/1733801760314/Put/seqid=0 2024-12-10T03:36:02,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742310_1486 (size=12001) 2024-12-10T03:36:02,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:02,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801822552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:02,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801822553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:02,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801822554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:02,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:02,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801822557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:02,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/9c7fa86718094911bd394509d601cefe 2024-12-10T03:36:02,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/c557a57b9b284678a0c91bf3cbe09605 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c557a57b9b284678a0c91bf3cbe09605 2024-12-10T03:36:02,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c557a57b9b284678a0c91bf3cbe09605, entries=200, sequenceid=53, filesize=38.6 K 2024-12-10T03:36:02,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/96376090310947fe9f10bfdac16b1958 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/96376090310947fe9f10bfdac16b1958 2024-12-10T03:36:02,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/96376090310947fe9f10bfdac16b1958, entries=150, sequenceid=53, filesize=11.7 K 2024-12-10T03:36:02,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/9c7fa86718094911bd394509d601cefe as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9c7fa86718094911bd394509d601cefe 2024-12-10T03:36:02,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9c7fa86718094911bd394509d601cefe, entries=150, sequenceid=53, filesize=11.7 K 2024-12-10T03:36:02,675 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 177cb6154ba45b2806835557c21cb1d3 in 1250ms, sequenceid=53, compaction requested=true 2024-12-10T03:36:02,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:02,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:36:02,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:02,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:36:02,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:02,675 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:02,675 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:02,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:36:02,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:02,676 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:02,676 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:02,676 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/A is initiating minor compaction (all files) 2024-12-10T03:36:02,676 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/B is initiating minor compaction (all files) 2024-12-10T03:36:02,676 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/A in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:02,676 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/B in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:02,676 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8a07313a0c97400f9b6c6e01ab26f2e3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8edce30291014bbe83bfc439b618fda5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c557a57b9b284678a0c91bf3cbe09605] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=99.1 K 2024-12-10T03:36:02,676 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/aaed304551854108bc98c77e32d6f726, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/21735d3c0bac43508c7b761dda1d1ca4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/96376090310947fe9f10bfdac16b1958] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=35.2 K 2024-12-10T03:36:02,676 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:02,676 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8a07313a0c97400f9b6c6e01ab26f2e3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8edce30291014bbe83bfc439b618fda5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c557a57b9b284678a0c91bf3cbe09605] 2024-12-10T03:36:02,676 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a07313a0c97400f9b6c6e01ab26f2e3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801759178 2024-12-10T03:36:02,676 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting aaed304551854108bc98c77e32d6f726, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801759178 2024-12-10T03:36:02,677 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8edce30291014bbe83bfc439b618fda5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733801759196 2024-12-10T03:36:02,677 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 21735d3c0bac43508c7b761dda1d1ca4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733801759196 2024-12-10T03:36:02,677 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 96376090310947fe9f10bfdac16b1958, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801760312 2024-12-10T03:36:02,677 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting c557a57b9b284678a0c91bf3cbe09605, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801760312 2024-12-10T03:36:02,680 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:02,682 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#B#compaction#415 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:02,682 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210384dd22a5ab84edbba82cd982a21ea79_177cb6154ba45b2806835557c21cb1d3 store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:02,682 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/5e3f90414cea4008afb8c0da826c01eb is 50, key is test_row_0/B:col10/1733801760314/Put/seqid=0 2024-12-10T03:36:02,683 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210384dd22a5ab84edbba82cd982a21ea79_177cb6154ba45b2806835557c21cb1d3, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:02,683 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210384dd22a5ab84edbba82cd982a21ea79_177cb6154ba45b2806835557c21cb1d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:02,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742312_1488 (size=4469) 2024-12-10T03:36:02,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742311_1487 (size=12104) 2024-12-10T03:36:03,087 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#A#compaction#414 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:03,088 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/f7acc157b40d4f118375ff1add3a8566 is 175, key is test_row_0/A:col10/1733801760314/Put/seqid=0 2024-12-10T03:36:03,090 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/5e3f90414cea4008afb8c0da826c01eb as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/5e3f90414cea4008afb8c0da826c01eb 2024-12-10T03:36:03,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742313_1489 (size=31058) 2024-12-10T03:36:03,094 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/B of 177cb6154ba45b2806835557c21cb1d3 into 5e3f90414cea4008afb8c0da826c01eb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:03,094 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:03,094 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/B, priority=13, startTime=1733801762675; duration=0sec 2024-12-10T03:36:03,094 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:03,094 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:B 2024-12-10T03:36:03,094 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:03,095 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:03,095 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/C is initiating minor compaction (all files) 2024-12-10T03:36:03,095 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/C in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:03,095 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/879a158e96e1447aa204dfe1f4a366fe, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9ca35b3596ba4c1e99e9cc45a2a25ffe, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9c7fa86718094911bd394509d601cefe] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=35.2 K 2024-12-10T03:36:03,095 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 879a158e96e1447aa204dfe1f4a366fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733801759178 2024-12-10T03:36:03,095 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ca35b3596ba4c1e99e9cc45a2a25ffe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733801759196 2024-12-10T03:36:03,095 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c7fa86718094911bd394509d601cefe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801760312 2024-12-10T03:36:03,109 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#C#compaction#416 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:03,109 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/b220544c2bb7499bbc6c9c3c5a3659ac is 50, key is test_row_0/C:col10/1733801760314/Put/seqid=0 2024-12-10T03:36:03,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742314_1490 (size=12104) 2024-12-10T03:36:03,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-10T03:36:03,276 INFO [Thread-2140 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-12-10T03:36:03,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:36:03,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-10T03:36:03,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T03:36:03,278 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:36:03,279 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:36:03,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:36:03,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:03,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:36:03,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:03,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:03,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:03,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:03,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:03,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:03,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108617ef0e877f450290fa96d188d029f3_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801761437/Put/seqid=0 2024-12-10T03:36:03,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:03,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801823340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742315_1491 (size=12154) 2024-12-10T03:36:03,349 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:03,352 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108617ef0e877f450290fa96d188d029f3_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108617ef0e877f450290fa96d188d029f3_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:03,352 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/a456b0c864ba48e4a865292956cf695c, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:03,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/a456b0c864ba48e4a865292956cf695c is 175, key is test_row_0/A:col10/1733801761437/Put/seqid=0 2024-12-10T03:36:03,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742316_1492 (size=30955) 2024-12-10T03:36:03,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T03:36:03,430 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T03:36:03,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:03,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:03,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:03,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801823442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,495 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/f7acc157b40d4f118375ff1add3a8566 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/f7acc157b40d4f118375ff1add3a8566 2024-12-10T03:36:03,499 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/A of 177cb6154ba45b2806835557c21cb1d3 into f7acc157b40d4f118375ff1add3a8566(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:03,499 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:03,499 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/A, priority=13, startTime=1733801762675; duration=0sec 2024-12-10T03:36:03,499 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:03,499 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:A 2024-12-10T03:36:03,544 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/b220544c2bb7499bbc6c9c3c5a3659ac as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b220544c2bb7499bbc6c9c3c5a3659ac 2024-12-10T03:36:03,548 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/C of 177cb6154ba45b2806835557c21cb1d3 into b220544c2bb7499bbc6c9c3c5a3659ac(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:03,548 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:03,548 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/C, priority=13, startTime=1733801762675; duration=0sec 2024-12-10T03:36:03,548 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:03,548 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:C 2024-12-10T03:36:03,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:03,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801823555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:03,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801823555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:03,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801823560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:03,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801823565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T03:36:03,582 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T03:36:03,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:03,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:03,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:03,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:03,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801823646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,734 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T03:36:03,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:03,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:03,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:03,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:03,757 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/a456b0c864ba48e4a865292956cf695c 2024-12-10T03:36:03,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/2671cc2250014defb97c158f3c9bbe7c is 50, key is test_row_0/B:col10/1733801761437/Put/seqid=0 2024-12-10T03:36:03,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742317_1493 (size=12001) 2024-12-10T03:36:03,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/2671cc2250014defb97c158f3c9bbe7c 2024-12-10T03:36:03,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/d34a1217c262468c8922e34ea8db4c65 is 50, key is test_row_0/C:col10/1733801761437/Put/seqid=0 2024-12-10T03:36:03,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742318_1494 (size=12001) 2024-12-10T03:36:03,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/d34a1217c262468c8922e34ea8db4c65 2024-12-10T03:36:03,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/a456b0c864ba48e4a865292956cf695c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/a456b0c864ba48e4a865292956cf695c 2024-12-10T03:36:03,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/a456b0c864ba48e4a865292956cf695c, entries=150, sequenceid=79, filesize=30.2 K 2024-12-10T03:36:03,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/2671cc2250014defb97c158f3c9bbe7c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2671cc2250014defb97c158f3c9bbe7c 2024-12-10T03:36:03,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2671cc2250014defb97c158f3c9bbe7c, entries=150, sequenceid=79, filesize=11.7 K 2024-12-10T03:36:03,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/d34a1217c262468c8922e34ea8db4c65 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d34a1217c262468c8922e34ea8db4c65 2024-12-10T03:36:03,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d34a1217c262468c8922e34ea8db4c65, entries=150, sequenceid=79, filesize=11.7 K 2024-12-10T03:36:03,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 177cb6154ba45b2806835557c21cb1d3 in 468ms, sequenceid=79, compaction requested=false 2024-12-10T03:36:03,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:03,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T03:36:03,886 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:03,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-10T03:36:03,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:03,887 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:36:03,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:03,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:03,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:03,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:03,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:03,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:03,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e112d116c4bd4bbd9cc5a1ad677e4560_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801763339/Put/seqid=0 2024-12-10T03:36:03,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742319_1495 (size=12154) 2024-12-10T03:36:03,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:03,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:03,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:03,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801823993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:04,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:04,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801824095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:04,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801824297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:04,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:04,308 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e112d116c4bd4bbd9cc5a1ad677e4560_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e112d116c4bd4bbd9cc5a1ad677e4560_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:04,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/bcb4ec106ffa4a79a1d2e2e23023ef20, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:04,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/bcb4ec106ffa4a79a1d2e2e23023ef20 is 175, key is test_row_0/A:col10/1733801763339/Put/seqid=0 2024-12-10T03:36:04,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742320_1496 (size=30955) 2024-12-10T03:36:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T03:36:04,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:04,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801824599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:04,712 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/bcb4ec106ffa4a79a1d2e2e23023ef20 2024-12-10T03:36:04,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/53eec2f4d55945a2a497c5c55f9d40c0 is 50, key is test_row_0/B:col10/1733801763339/Put/seqid=0 2024-12-10T03:36:04,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742321_1497 (size=12001) 2024-12-10T03:36:05,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801825101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,128 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/53eec2f4d55945a2a497c5c55f9d40c0 2024-12-10T03:36:05,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/c8b3f44878964112b4f4abcd9bc9a6ee is 50, key is test_row_0/C:col10/1733801763339/Put/seqid=0 2024-12-10T03:36:05,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742322_1498 (size=12001) 2024-12-10T03:36:05,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T03:36:05,537 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/c8b3f44878964112b4f4abcd9bc9a6ee 2024-12-10T03:36:05,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/bcb4ec106ffa4a79a1d2e2e23023ef20 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bcb4ec106ffa4a79a1d2e2e23023ef20 2024-12-10T03:36:05,543 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bcb4ec106ffa4a79a1d2e2e23023ef20, entries=150, sequenceid=92, filesize=30.2 K 2024-12-10T03:36:05,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/53eec2f4d55945a2a497c5c55f9d40c0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/53eec2f4d55945a2a497c5c55f9d40c0 2024-12-10T03:36:05,546 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/53eec2f4d55945a2a497c5c55f9d40c0, entries=150, sequenceid=92, filesize=11.7 K 2024-12-10T03:36:05,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/c8b3f44878964112b4f4abcd9bc9a6ee as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c8b3f44878964112b4f4abcd9bc9a6ee 2024-12-10T03:36:05,548 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c8b3f44878964112b4f4abcd9bc9a6ee, entries=150, sequenceid=92, filesize=11.7 K 2024-12-10T03:36:05,549 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 177cb6154ba45b2806835557c21cb1d3 in 1662ms, sequenceid=92, compaction requested=true 2024-12-10T03:36:05,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:05,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:05,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-10T03:36:05,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-10T03:36:05,551 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-10T03:36:05,551 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2710 sec 2024-12-10T03:36:05,551 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 2.2740 sec 2024-12-10T03:36:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:05,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T03:36:05,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:05,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:05,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:05,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:05,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:05,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:05,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f2a514984f26435aa59ffc8d1427ed7e_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801763992/Put/seqid=0 2024-12-10T03:36:05,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742323_1499 (size=12154) 2024-12-10T03:36:05,570 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:05,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801825568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,571 DEBUG [Thread-2134 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4129 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., hostname=50b9ef1c5472,37553,1733801610862, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:36:05,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801825568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801825569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,572 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f2a514984f26435aa59ffc8d1427ed7e_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f2a514984f26435aa59ffc8d1427ed7e_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:05,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801825570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,573 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/bfa4a18a8083493ca9435ad9d4c556be, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:05,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/bfa4a18a8083493ca9435ad9d4c556be is 175, key is test_row_0/A:col10/1733801763992/Put/seqid=0 2024-12-10T03:36:05,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742324_1500 (size=30955) 2024-12-10T03:36:05,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801825672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801825672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801825673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801825874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801825875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801825876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:05,977 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/bfa4a18a8083493ca9435ad9d4c556be 2024-12-10T03:36:05,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/2afbe257c6ce4a168956e0135a445db2 is 50, key is test_row_0/B:col10/1733801763992/Put/seqid=0 2024-12-10T03:36:05,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742325_1501 (size=12001) 2024-12-10T03:36:06,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801826105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:06,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801826178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:06,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801826178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:06,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801826179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:06,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/2afbe257c6ce4a168956e0135a445db2 2024-12-10T03:36:06,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/554db4cc8c8047e6b048c49f356b6404 is 50, key is test_row_0/C:col10/1733801763992/Put/seqid=0 2024-12-10T03:36:06,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742326_1502 (size=12001) 2024-12-10T03:36:06,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801826680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:06,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801826681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:06,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:06,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801826684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:06,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/554db4cc8c8047e6b048c49f356b6404 2024-12-10T03:36:06,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/bfa4a18a8083493ca9435ad9d4c556be as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bfa4a18a8083493ca9435ad9d4c556be 2024-12-10T03:36:06,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bfa4a18a8083493ca9435ad9d4c556be, entries=150, sequenceid=118, filesize=30.2 K 2024-12-10T03:36:06,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/2afbe257c6ce4a168956e0135a445db2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2afbe257c6ce4a168956e0135a445db2 2024-12-10T03:36:06,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2afbe257c6ce4a168956e0135a445db2, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T03:36:06,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/554db4cc8c8047e6b048c49f356b6404 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/554db4cc8c8047e6b048c49f356b6404 2024-12-10T03:36:06,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/554db4cc8c8047e6b048c49f356b6404, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T03:36:06,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 177cb6154ba45b2806835557c21cb1d3 in 1243ms, sequenceid=118, compaction requested=true 2024-12-10T03:36:06,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:06,806 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:36:06,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:36:06,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:06,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:36:06,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:06,806 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:36:06,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:36:06,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:06,807 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123923 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:36:06,807 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/A is initiating minor compaction (all files) 2024-12-10T03:36:06,807 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:36:06,807 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/B is initiating minor compaction (all files) 2024-12-10T03:36:06,807 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/A in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:06,807 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/B in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:06,807 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/f7acc157b40d4f118375ff1add3a8566, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/a456b0c864ba48e4a865292956cf695c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bcb4ec106ffa4a79a1d2e2e23023ef20, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bfa4a18a8083493ca9435ad9d4c556be] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=121.0 K 2024-12-10T03:36:06,807 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/5e3f90414cea4008afb8c0da826c01eb, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2671cc2250014defb97c158f3c9bbe7c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/53eec2f4d55945a2a497c5c55f9d40c0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2afbe257c6ce4a168956e0135a445db2] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=47.0 K 2024-12-10T03:36:06,807 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:06,807 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/f7acc157b40d4f118375ff1add3a8566, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/a456b0c864ba48e4a865292956cf695c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bcb4ec106ffa4a79a1d2e2e23023ef20, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bfa4a18a8083493ca9435ad9d4c556be] 2024-12-10T03:36:06,807 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e3f90414cea4008afb8c0da826c01eb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801760312 2024-12-10T03:36:06,807 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7acc157b40d4f118375ff1add3a8566, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801760312 2024-12-10T03:36:06,808 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2671cc2250014defb97c158f3c9bbe7c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733801761437 2024-12-10T03:36:06,808 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting a456b0c864ba48e4a865292956cf695c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733801761437 2024-12-10T03:36:06,808 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 53eec2f4d55945a2a497c5c55f9d40c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733801763331 2024-12-10T03:36:06,808 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcb4ec106ffa4a79a1d2e2e23023ef20, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733801763331 2024-12-10T03:36:06,808 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 2afbe257c6ce4a168956e0135a445db2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801763975 2024-12-10T03:36:06,808 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfa4a18a8083493ca9435ad9d4c556be, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801763975 2024-12-10T03:36:06,813 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:06,814 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#B#compaction#426 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:06,814 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/8f0373b4148b4eae861d64a47616f08a is 50, key is test_row_0/B:col10/1733801763992/Put/seqid=0 2024-12-10T03:36:06,816 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121082850f3d018944d9a40a9ff65d2a8327_177cb6154ba45b2806835557c21cb1d3 store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:06,817 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121082850f3d018944d9a40a9ff65d2a8327_177cb6154ba45b2806835557c21cb1d3, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:06,817 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121082850f3d018944d9a40a9ff65d2a8327_177cb6154ba45b2806835557c21cb1d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:06,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742327_1503 (size=12241) 2024-12-10T03:36:06,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742328_1504 (size=4469) 2024-12-10T03:36:07,221 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#A#compaction#427 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:07,221 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/184cd040e95c46179f7456d80798b5d8 is 175, key is test_row_0/A:col10/1733801763992/Put/seqid=0 2024-12-10T03:36:07,222 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/8f0373b4148b4eae861d64a47616f08a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/8f0373b4148b4eae861d64a47616f08a 2024-12-10T03:36:07,226 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/B of 177cb6154ba45b2806835557c21cb1d3 into 8f0373b4148b4eae861d64a47616f08a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:07,226 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:07,226 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/B, priority=12, startTime=1733801766806; duration=0sec 2024-12-10T03:36:07,226 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:07,226 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:B 2024-12-10T03:36:07,226 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:36:07,227 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:36:07,227 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/C is initiating minor compaction (all files) 2024-12-10T03:36:07,227 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/C in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:07,227 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b220544c2bb7499bbc6c9c3c5a3659ac, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d34a1217c262468c8922e34ea8db4c65, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c8b3f44878964112b4f4abcd9bc9a6ee, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/554db4cc8c8047e6b048c49f356b6404] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=47.0 K 2024-12-10T03:36:07,227 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting b220544c2bb7499bbc6c9c3c5a3659ac, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733801760312 2024-12-10T03:36:07,228 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d34a1217c262468c8922e34ea8db4c65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733801761437 2024-12-10T03:36:07,228 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c8b3f44878964112b4f4abcd9bc9a6ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733801763331 2024-12-10T03:36:07,228 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 554db4cc8c8047e6b048c49f356b6404, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801763975 2024-12-10T03:36:07,249 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#C#compaction#428 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:07,249 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/10af33a326e64269b36a33200c9c3030 is 50, key is test_row_0/C:col10/1733801763992/Put/seqid=0 2024-12-10T03:36:07,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742329_1505 (size=31195) 2024-12-10T03:36:07,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742330_1506 (size=12241) 2024-12-10T03:36:07,255 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/184cd040e95c46179f7456d80798b5d8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/184cd040e95c46179f7456d80798b5d8 2024-12-10T03:36:07,258 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/A of 177cb6154ba45b2806835557c21cb1d3 into 184cd040e95c46179f7456d80798b5d8(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:07,258 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:07,258 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/A, priority=12, startTime=1733801766806; duration=0sec 2024-12-10T03:36:07,258 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:07,258 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:A 2024-12-10T03:36:07,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-10T03:36:07,382 INFO [Thread-2140 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-10T03:36:07,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:36:07,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-10T03:36:07,384 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:36:07,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T03:36:07,384 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:36:07,384 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:36:07,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T03:36:07,535 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:07,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T03:36:07,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:07,536 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T03:36:07,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:07,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:07,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:07,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:07,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:07,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:07,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103bf2bac8681d4b028b2dc1aa7b0ceebc_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801765568/Put/seqid=0 2024-12-10T03:36:07,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742331_1507 (size=12154) 2024-12-10T03:36:07,656 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/10af33a326e64269b36a33200c9c3030 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/10af33a326e64269b36a33200c9c3030 2024-12-10T03:36:07,659 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/C of 177cb6154ba45b2806835557c21cb1d3 into 10af33a326e64269b36a33200c9c3030(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:07,659 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:07,659 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/C, priority=12, startTime=1733801766806; duration=0sec 2024-12-10T03:36:07,659 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:07,659 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:C 2024-12-10T03:36:07,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:07,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:07,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T03:36:07,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:07,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801827699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:07,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:07,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801827700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:07,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:07,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801827701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:07,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:07,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801827801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:07,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:07,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801827802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:07,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:07,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801827802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:07,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:07,950 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103bf2bac8681d4b028b2dc1aa7b0ceebc_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103bf2bac8681d4b028b2dc1aa7b0ceebc_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:07,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/0ecd2c1dbb3a42ffb76a3c4310633207, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:07,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/0ecd2c1dbb3a42ffb76a3c4310633207 is 175, key is test_row_0/A:col10/1733801765568/Put/seqid=0 2024-12-10T03:36:07,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742332_1508 (size=30955) 2024-12-10T03:36:07,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T03:36:08,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801828003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801828004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801828004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801828125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,127 DEBUG [Thread-2130 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4134 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., hostname=50b9ef1c5472,37553,1733801610862, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:36:08,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801828306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801828307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801828307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,354 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/0ecd2c1dbb3a42ffb76a3c4310633207 2024-12-10T03:36:08,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/e1995715dcf641ada3dc05de820d52b0 is 50, key is test_row_0/B:col10/1733801765568/Put/seqid=0 2024-12-10T03:36:08,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742333_1509 (size=12001) 2024-12-10T03:36:08,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T03:36:08,786 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/e1995715dcf641ada3dc05de820d52b0 2024-12-10T03:36:08,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/ada165946ead4507a0fc00162926d574 is 50, key is test_row_0/C:col10/1733801765568/Put/seqid=0 2024-12-10T03:36:08,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742334_1510 (size=12001) 2024-12-10T03:36:08,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801828809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801828811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:08,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:08,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801828812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,194 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/ada165946ead4507a0fc00162926d574 2024-12-10T03:36:09,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/0ecd2c1dbb3a42ffb76a3c4310633207 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/0ecd2c1dbb3a42ffb76a3c4310633207 2024-12-10T03:36:09,200 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/0ecd2c1dbb3a42ffb76a3c4310633207, entries=150, sequenceid=130, filesize=30.2 K 2024-12-10T03:36:09,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/e1995715dcf641ada3dc05de820d52b0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e1995715dcf641ada3dc05de820d52b0 2024-12-10T03:36:09,203 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e1995715dcf641ada3dc05de820d52b0, entries=150, sequenceid=130, filesize=11.7 K 2024-12-10T03:36:09,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/ada165946ead4507a0fc00162926d574 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ada165946ead4507a0fc00162926d574 2024-12-10T03:36:09,206 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ada165946ead4507a0fc00162926d574, entries=150, sequenceid=130, filesize=11.7 K 2024-12-10T03:36:09,207 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 177cb6154ba45b2806835557c21cb1d3 in 1671ms, sequenceid=130, compaction requested=false 2024-12-10T03:36:09,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:09,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:09,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-10T03:36:09,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-10T03:36:09,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-10T03:36:09,208 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8240 sec 2024-12-10T03:36:09,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 1.8250 sec 2024-12-10T03:36:09,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T03:36:09,487 INFO [Thread-2140 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-10T03:36:09,488 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:36:09,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-10T03:36:09,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T03:36:09,489 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:36:09,489 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:36:09,489 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:36:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T03:36:09,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:09,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T03:36:09,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:09,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:09,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:09,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:09,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:09,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:09,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e495232d75d2411d93182a2fb5d933ee_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801767700/Put/seqid=0 2024-12-10T03:36:09,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742335_1511 (size=12304) 2024-12-10T03:36:09,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:09,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801829621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,640 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:09,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:09,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:09,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:09,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:09,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:09,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:09,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801829723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T03:36:09,792 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:09,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:09,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:09,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:09,793 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:09,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:09,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:09,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:09,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801829818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:09,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801829818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:09,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801829821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801829926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,944 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:09,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:09,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:09,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:09,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:09,945 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:09,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,019 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:10,022 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e495232d75d2411d93182a2fb5d933ee_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e495232d75d2411d93182a2fb5d933ee_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:10,022 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/607cf32993c545b5860f319473ae2e06, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:10,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/607cf32993c545b5860f319473ae2e06 is 175, key is test_row_0/A:col10/1733801767700/Put/seqid=0 2024-12-10T03:36:10,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742336_1512 (size=31105) 2024-12-10T03:36:10,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T03:36:10,096 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:10,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:10,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:10,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:10,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801830228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:10,248 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:10,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,249 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,400 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:10,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:10,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:10,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,431 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/607cf32993c545b5860f319473ae2e06 2024-12-10T03:36:10,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/64a8755427634d92b95684d7b5a33a45 is 50, key is test_row_0/B:col10/1733801767700/Put/seqid=0 2024-12-10T03:36:10,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742337_1513 (size=12151) 2024-12-10T03:36:10,552 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:10,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:10,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:10,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T03:36:10,704 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:10,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,705 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:10,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801830731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:10,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/64a8755427634d92b95684d7b5a33a45 2024-12-10T03:36:10,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/c95c4f1fbdf4428eb03f7b3bc9106cf7 is 50, key is test_row_0/C:col10/1733801767700/Put/seqid=0 2024-12-10T03:36:10,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742338_1514 (size=12151) 2024-12-10T03:36:10,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:10,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:10,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:10,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:10,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:10,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:11,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:11,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:11,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:11,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:11,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:11,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:11,160 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:11,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:11,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:11,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:11,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:11,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:11,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/c95c4f1fbdf4428eb03f7b3bc9106cf7 2024-12-10T03:36:11,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/607cf32993c545b5860f319473ae2e06 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/607cf32993c545b5860f319473ae2e06 2024-12-10T03:36:11,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/607cf32993c545b5860f319473ae2e06, entries=150, sequenceid=158, filesize=30.4 K 2024-12-10T03:36:11,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/64a8755427634d92b95684d7b5a33a45 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/64a8755427634d92b95684d7b5a33a45 2024-12-10T03:36:11,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/64a8755427634d92b95684d7b5a33a45, entries=150, sequenceid=158, filesize=11.9 K 2024-12-10T03:36:11,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/c95c4f1fbdf4428eb03f7b3bc9106cf7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c95c4f1fbdf4428eb03f7b3bc9106cf7 2024-12-10T03:36:11,268 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c95c4f1fbdf4428eb03f7b3bc9106cf7, entries=150, sequenceid=158, filesize=11.9 K 2024-12-10T03:36:11,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 177cb6154ba45b2806835557c21cb1d3 in 1659ms, sequenceid=158, compaction requested=true 2024-12-10T03:36:11,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:11,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:36:11,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:11,269 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:11,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:36:11,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:11,269 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:11,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:36:11,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93255 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/B is initiating minor compaction (all files) 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/A is initiating minor compaction (all files) 2024-12-10T03:36:11,270 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/A in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,270 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/B in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,270 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/8f0373b4148b4eae861d64a47616f08a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e1995715dcf641ada3dc05de820d52b0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/64a8755427634d92b95684d7b5a33a45] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=35.5 K 2024-12-10T03:36:11,270 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/184cd040e95c46179f7456d80798b5d8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/0ecd2c1dbb3a42ffb76a3c4310633207, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/607cf32993c545b5860f319473ae2e06] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=91.1 K 2024-12-10T03:36:11,270 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/184cd040e95c46179f7456d80798b5d8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/0ecd2c1dbb3a42ffb76a3c4310633207, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/607cf32993c545b5860f319473ae2e06] 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f0373b4148b4eae861d64a47616f08a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801763975 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 184cd040e95c46179f7456d80798b5d8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801763975 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting e1995715dcf641ada3dc05de820d52b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801765566 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ecd2c1dbb3a42ffb76a3c4310633207, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801765566 2024-12-10T03:36:11,270 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 64a8755427634d92b95684d7b5a33a45, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733801767696 2024-12-10T03:36:11,271 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 607cf32993c545b5860f319473ae2e06, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733801767696 2024-12-10T03:36:11,274 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:11,275 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#B#compaction#435 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:11,275 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/c283b59db7d94b799adb99b3c2e606c3 is 50, key is test_row_0/B:col10/1733801767700/Put/seqid=0 2024-12-10T03:36:11,276 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210b8ea396a098b428bb490bbe85ccb4c9d_177cb6154ba45b2806835557c21cb1d3 store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:11,277 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210b8ea396a098b428bb490bbe85ccb4c9d_177cb6154ba45b2806835557c21cb1d3, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:11,277 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b8ea396a098b428bb490bbe85ccb4c9d_177cb6154ba45b2806835557c21cb1d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:11,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742339_1515 (size=12493) 2024-12-10T03:36:11,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742340_1516 (size=4469) 2024-12-10T03:36:11,312 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:11,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T03:36:11,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,313 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-10T03:36:11,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:11,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:11,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:11,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:11,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:11,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:11,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210a6964d38e0fd4072a37a5b9630b78ae4_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801769611/Put/seqid=0 2024-12-10T03:36:11,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742341_1517 (size=12304) 2024-12-10T03:36:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T03:36:11,681 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#A#compaction#436 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:11,681 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/eb704ff2b63d4effaff7ab39b268a76b is 175, key is test_row_0/A:col10/1733801767700/Put/seqid=0 2024-12-10T03:36:11,684 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/c283b59db7d94b799adb99b3c2e606c3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/c283b59db7d94b799adb99b3c2e606c3 2024-12-10T03:36:11,687 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/B of 177cb6154ba45b2806835557c21cb1d3 into c283b59db7d94b799adb99b3c2e606c3(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:11,687 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:11,687 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/B, priority=13, startTime=1733801771269; duration=0sec 2024-12-10T03:36:11,687 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:11,687 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:B 2024-12-10T03:36:11,687 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:11,688 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:11,688 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/C is initiating minor compaction (all files) 2024-12-10T03:36:11,688 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/C in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:11,688 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/10af33a326e64269b36a33200c9c3030, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ada165946ead4507a0fc00162926d574, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c95c4f1fbdf4428eb03f7b3bc9106cf7] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=35.5 K 2024-12-10T03:36:11,688 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 10af33a326e64269b36a33200c9c3030, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733801763975 2024-12-10T03:36:11,689 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ada165946ead4507a0fc00162926d574, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733801765566 2024-12-10T03:36:11,689 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c95c4f1fbdf4428eb03f7b3bc9106cf7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733801767696 2024-12-10T03:36:11,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742342_1518 (size=31447) 2024-12-10T03:36:11,695 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#C#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:11,695 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/ee9f1406e70241eca292b191e3b87a8c is 50, key is test_row_0/C:col10/1733801767700/Put/seqid=0 2024-12-10T03:36:11,695 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/eb704ff2b63d4effaff7ab39b268a76b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/eb704ff2b63d4effaff7ab39b268a76b 2024-12-10T03:36:11,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742343_1519 (size=12493) 2024-12-10T03:36:11,700 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/A of 177cb6154ba45b2806835557c21cb1d3 into eb704ff2b63d4effaff7ab39b268a76b(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:11,700 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:11,700 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/A, priority=13, startTime=1733801771269; duration=0sec 2024-12-10T03:36:11,700 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:11,700 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:A 2024-12-10T03:36:11,702 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/ee9f1406e70241eca292b191e3b87a8c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ee9f1406e70241eca292b191e3b87a8c 2024-12-10T03:36:11,706 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/C of 177cb6154ba45b2806835557c21cb1d3 into ee9f1406e70241eca292b191e3b87a8c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:11,706 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:11,706 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/C, priority=13, startTime=1733801771269; duration=0sec 2024-12-10T03:36:11,706 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:11,706 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:C 2024-12-10T03:36:11,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:11,725 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210a6964d38e0fd4072a37a5b9630b78ae4_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a6964d38e0fd4072a37a5b9630b78ae4_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:11,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/11253e94e7ca44b3b30de746c9691875, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:11,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/11253e94e7ca44b3b30de746c9691875 is 175, key is test_row_0/A:col10/1733801769611/Put/seqid=0 2024-12-10T03:36:11,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742344_1520 (size=31105) 2024-12-10T03:36:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:11,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:11,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801831772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:11,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801831830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:11,832 DEBUG [Thread-2138 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., hostname=50b9ef1c5472,37553,1733801610862, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:36:11,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801831837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:11,838 DEBUG [Thread-2132 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., hostname=50b9ef1c5472,37553,1733801610862, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:36:11,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801831839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:11,839 DEBUG [Thread-2136 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., hostname=50b9ef1c5472,37553,1733801610862, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:36:11,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801831874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:12,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:12,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801832076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:12,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:12,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52762 deadline: 1733801832129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:12,131 DEBUG [Thread-2130 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., hostname=50b9ef1c5472,37553,1733801610862, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:36:12,138 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=167, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/11253e94e7ca44b3b30de746c9691875 2024-12-10T03:36:12,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/732f1440f3764e59b5aee86f2dfc5670 is 50, key is test_row_0/B:col10/1733801769611/Put/seqid=0 2024-12-10T03:36:12,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742345_1521 (size=12151) 2024-12-10T03:36:12,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:12,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801832379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:12,545 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/732f1440f3764e59b5aee86f2dfc5670 2024-12-10T03:36:12,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/d33285621d8c4793b83bd3ff8bfb3556 is 50, key is test_row_0/C:col10/1733801769611/Put/seqid=0 2024-12-10T03:36:12,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742346_1522 (size=12151) 2024-12-10T03:36:12,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:12,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801832884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:12,953 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/d33285621d8c4793b83bd3ff8bfb3556 2024-12-10T03:36:12,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/11253e94e7ca44b3b30de746c9691875 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/11253e94e7ca44b3b30de746c9691875 2024-12-10T03:36:12,959 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/11253e94e7ca44b3b30de746c9691875, entries=150, sequenceid=167, filesize=30.4 K 2024-12-10T03:36:12,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/732f1440f3764e59b5aee86f2dfc5670 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/732f1440f3764e59b5aee86f2dfc5670 2024-12-10T03:36:12,962 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/732f1440f3764e59b5aee86f2dfc5670, entries=150, sequenceid=167, filesize=11.9 K 2024-12-10T03:36:12,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/d33285621d8c4793b83bd3ff8bfb3556 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d33285621d8c4793b83bd3ff8bfb3556 2024-12-10T03:36:12,965 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d33285621d8c4793b83bd3ff8bfb3556, entries=150, sequenceid=167, filesize=11.9 K 2024-12-10T03:36:12,965 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 177cb6154ba45b2806835557c21cb1d3 in 1652ms, sequenceid=167, compaction requested=false 2024-12-10T03:36:12,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:12,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:12,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-10T03:36:12,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-10T03:36:12,967 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-10T03:36:12,968 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.4770 sec 2024-12-10T03:36:12,968 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 3.4800 sec 2024-12-10T03:36:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T03:36:13,593 INFO [Thread-2140 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-10T03:36:13,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:36:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-10T03:36:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T03:36:13,595 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:36:13,595 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:36:13,595 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:36:13,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T03:36:13,746 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:13,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T03:36:13,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:13,747 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T03:36:13,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:13,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:13,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:13,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:13,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:13,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:13,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ea53061d404f462182df42abadee98bb_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801771769/Put/seqid=0 2024-12-10T03:36:13,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742347_1523 (size=12304) 2024-12-10T03:36:13,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T03:36:13,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801833897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:14,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:14,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801833999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:14,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:14,158 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ea53061d404f462182df42abadee98bb_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ea53061d404f462182df42abadee98bb_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:14,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/99cda2e3a0304049806850e8f832bbc3, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:14,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/99cda2e3a0304049806850e8f832bbc3 is 175, key is test_row_0/A:col10/1733801771769/Put/seqid=0 2024-12-10T03:36:14,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742348_1524 (size=31105) 2024-12-10T03:36:14,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T03:36:14,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:14,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801834201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:14,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:14,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801834503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:14,562 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/99cda2e3a0304049806850e8f832bbc3 2024-12-10T03:36:14,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/d682667becec4758b331355f6e410303 is 50, key is test_row_0/B:col10/1733801771769/Put/seqid=0 2024-12-10T03:36:14,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742349_1525 (size=12151) 2024-12-10T03:36:14,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T03:36:14,970 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/d682667becec4758b331355f6e410303 2024-12-10T03:36:14,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/25d15f492af4497c99d7bef275fcebd3 is 50, key is test_row_0/C:col10/1733801771769/Put/seqid=0 2024-12-10T03:36:14,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742350_1526 (size=12151) 2024-12-10T03:36:15,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:15,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801835007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:15,377 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/25d15f492af4497c99d7bef275fcebd3 2024-12-10T03:36:15,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/99cda2e3a0304049806850e8f832bbc3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/99cda2e3a0304049806850e8f832bbc3 2024-12-10T03:36:15,382 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/99cda2e3a0304049806850e8f832bbc3, entries=150, sequenceid=197, filesize=30.4 K 2024-12-10T03:36:15,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/d682667becec4758b331355f6e410303 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/d682667becec4758b331355f6e410303 2024-12-10T03:36:15,386 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/d682667becec4758b331355f6e410303, entries=150, sequenceid=197, filesize=11.9 K 2024-12-10T03:36:15,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/25d15f492af4497c99d7bef275fcebd3 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/25d15f492af4497c99d7bef275fcebd3 2024-12-10T03:36:15,389 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/25d15f492af4497c99d7bef275fcebd3, entries=150, sequenceid=197, filesize=11.9 K 2024-12-10T03:36:15,390 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 177cb6154ba45b2806835557c21cb1d3 in 1643ms, sequenceid=197, compaction requested=true 2024-12-10T03:36:15,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:15,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:15,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-10T03:36:15,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-10T03:36:15,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-10T03:36:15,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7960 sec 2024-12-10T03:36:15,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.7980 sec 2024-12-10T03:36:15,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T03:36:15,698 INFO [Thread-2140 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-10T03:36:15,699 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T03:36:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-10T03:36:15,700 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T03:36:15,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T03:36:15,700 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T03:36:15,700 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T03:36:15,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T03:36:15,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:15,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T03:36:15,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:15,851 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:15,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T03:36:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:15,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:15,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:15,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:15,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:15,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bdce5d9babf942d4b41e45612a92c82e_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801775843/Put/seqid=0 2024-12-10T03:36:15,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742351_1527 (size=14794) 2024-12-10T03:36:15,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:15,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801835883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:15,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:15,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:15,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801835884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:15,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801835884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:15,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:15,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801835986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:15,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:15,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801835988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:15,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:15,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801835988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T03:36:16,003 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T03:36:16,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:16,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:16,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801836011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T03:36:16,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:16,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:16,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801836187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:16,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801836189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:16,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801836190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,260 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:16,262 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bdce5d9babf942d4b41e45612a92c82e_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bdce5d9babf942d4b41e45612a92c82e_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:16,263 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/316da7b3b1dd4f5d8414a7bc5c6d35d2, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:16,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/316da7b3b1dd4f5d8414a7bc5c6d35d2 is 175, key is test_row_0/A:col10/1733801775843/Put/seqid=0 2024-12-10T03:36:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742352_1528 (size=39749) 2024-12-10T03:36:16,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T03:36:16,307 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T03:36:16,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:16,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T03:36:16,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:16,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,460 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801836491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:16,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801836493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:16,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801836494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,612 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T03:36:16,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:16,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T03:36:16,667 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=208, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/316da7b3b1dd4f5d8414a7bc5c6d35d2 2024-12-10T03:36:16,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/de5b1f323b6c443dbed7da498f4387f1 is 50, key is test_row_0/B:col10/1733801775843/Put/seqid=0 2024-12-10T03:36:16,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742353_1529 (size=12151) 2024-12-10T03:36:16,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/de5b1f323b6c443dbed7da498f4387f1 2024-12-10T03:36:16,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/b62b35a170384e56b2a5c60144dd66d7 is 50, key is test_row_0/C:col10/1733801775843/Put/seqid=0 2024-12-10T03:36:16,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742354_1530 (size=12151) 2024-12-10T03:36:16,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/b62b35a170384e56b2a5c60144dd66d7 2024-12-10T03:36:16,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/316da7b3b1dd4f5d8414a7bc5c6d35d2 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/316da7b3b1dd4f5d8414a7bc5c6d35d2 2024-12-10T03:36:16,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/316da7b3b1dd4f5d8414a7bc5c6d35d2, entries=200, sequenceid=208, filesize=38.8 K 2024-12-10T03:36:16,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/de5b1f323b6c443dbed7da498f4387f1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/de5b1f323b6c443dbed7da498f4387f1 2024-12-10T03:36:16,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/de5b1f323b6c443dbed7da498f4387f1, entries=150, sequenceid=208, filesize=11.9 K 2024-12-10T03:36:16,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/b62b35a170384e56b2a5c60144dd66d7 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b62b35a170384e56b2a5c60144dd66d7 2024-12-10T03:36:16,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b62b35a170384e56b2a5c60144dd66d7, entries=150, sequenceid=208, filesize=11.9 K 2024-12-10T03:36:16,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 177cb6154ba45b2806835557c21cb1d3 in 872ms, sequenceid=208, compaction requested=true 2024-12-10T03:36:16,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:16,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:36:16,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:16,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:36:16,719 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:36:16,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:16,719 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:36:16,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:36:16,719 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:16,720 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133406 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:36:16,720 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:36:16,720 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/B is initiating minor compaction (all files) 2024-12-10T03:36:16,720 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/A is initiating minor compaction (all files) 2024-12-10T03:36:16,720 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/A in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,720 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/B in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,720 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/eb704ff2b63d4effaff7ab39b268a76b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/11253e94e7ca44b3b30de746c9691875, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/99cda2e3a0304049806850e8f832bbc3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/316da7b3b1dd4f5d8414a7bc5c6d35d2] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=130.3 K 2024-12-10T03:36:16,720 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/c283b59db7d94b799adb99b3c2e606c3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/732f1440f3764e59b5aee86f2dfc5670, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/d682667becec4758b331355f6e410303, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/de5b1f323b6c443dbed7da498f4387f1] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=47.8 K 2024-12-10T03:36:16,720 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,720 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/eb704ff2b63d4effaff7ab39b268a76b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/11253e94e7ca44b3b30de746c9691875, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/99cda2e3a0304049806850e8f832bbc3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/316da7b3b1dd4f5d8414a7bc5c6d35d2] 2024-12-10T03:36:16,720 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting c283b59db7d94b799adb99b3c2e606c3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733801767696 2024-12-10T03:36:16,720 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb704ff2b63d4effaff7ab39b268a76b, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733801767696 2024-12-10T03:36:16,721 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 732f1440f3764e59b5aee86f2dfc5670, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733801769611 2024-12-10T03:36:16,721 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11253e94e7ca44b3b30de746c9691875, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733801769611 2024-12-10T03:36:16,721 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d682667becec4758b331355f6e410303, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733801771754 2024-12-10T03:36:16,721 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99cda2e3a0304049806850e8f832bbc3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733801771754 2024-12-10T03:36:16,721 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting de5b1f323b6c443dbed7da498f4387f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733801773890 2024-12-10T03:36:16,721 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 316da7b3b1dd4f5d8414a7bc5c6d35d2, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733801773890 2024-12-10T03:36:16,726 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:16,727 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#B#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:16,727 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/75fb33d513fd4d67a17cdcb5542c9e5a is 50, key is test_row_0/B:col10/1733801775843/Put/seqid=0 2024-12-10T03:36:16,728 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210075f006267694e3ab1bd16ecf37e0ee0_177cb6154ba45b2806835557c21cb1d3 store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:16,730 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210075f006267694e3ab1bd16ecf37e0ee0_177cb6154ba45b2806835557c21cb1d3, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:16,730 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210075f006267694e3ab1bd16ecf37e0ee0_177cb6154ba45b2806835557c21cb1d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:16,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742355_1531 (size=12629) 2024-12-10T03:36:16,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742356_1532 (size=4469) 2024-12-10T03:36:16,735 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/75fb33d513fd4d67a17cdcb5542c9e5a as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/75fb33d513fd4d67a17cdcb5542c9e5a 2024-12-10T03:36:16,739 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/B of 177cb6154ba45b2806835557c21cb1d3 into 75fb33d513fd4d67a17cdcb5542c9e5a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:16,739 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:16,739 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/B, priority=12, startTime=1733801776719; duration=0sec 2024-12-10T03:36:16,739 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:16,739 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:B 2024-12-10T03:36:16,739 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T03:36:16,740 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T03:36:16,740 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/C is initiating minor compaction (all files) 2024-12-10T03:36:16,740 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/C in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,740 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ee9f1406e70241eca292b191e3b87a8c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d33285621d8c4793b83bd3ff8bfb3556, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/25d15f492af4497c99d7bef275fcebd3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b62b35a170384e56b2a5c60144dd66d7] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=47.8 K 2024-12-10T03:36:16,740 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting ee9f1406e70241eca292b191e3b87a8c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733801767696 2024-12-10T03:36:16,741 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting d33285621d8c4793b83bd3ff8bfb3556, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1733801769611 2024-12-10T03:36:16,741 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 25d15f492af4497c99d7bef275fcebd3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733801771754 2024-12-10T03:36:16,741 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting b62b35a170384e56b2a5c60144dd66d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733801773890 2024-12-10T03:36:16,748 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#C#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:16,749 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/a67465f001894b12b3e8bfb87d91c91d is 50, key is test_row_0/C:col10/1733801775843/Put/seqid=0 2024-12-10T03:36:16,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742357_1533 (size=12629) 2024-12-10T03:36:16,764 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:16,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37553 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T03:36:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:16,764 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T03:36:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:16,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:16,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bb84b825399242a484113af8899f9f55_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801775879/Put/seqid=0 2024-12-10T03:36:16,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742358_1534 (size=12304) 2024-12-10T03:36:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T03:36:16,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. as already flushing 2024-12-10T03:36:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:17,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801837029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801837029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801837029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,134 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#A#compaction#448 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801837132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801837132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801837132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,135 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/4b9313a27db9419985a813d5a54e0ccb is 175, key is test_row_0/A:col10/1733801775843/Put/seqid=0 2024-12-10T03:36:17,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742359_1535 (size=31583) 2024-12-10T03:36:17,162 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/a67465f001894b12b3e8bfb87d91c91d as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/a67465f001894b12b3e8bfb87d91c91d 2024-12-10T03:36:17,165 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/C of 177cb6154ba45b2806835557c21cb1d3 into a67465f001894b12b3e8bfb87d91c91d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:17,166 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:17,166 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/C, priority=12, startTime=1733801776719; duration=0sec 2024-12-10T03:36:17,166 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:17,166 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:C 2024-12-10T03:36:17,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:17,181 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bb84b825399242a484113af8899f9f55_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bb84b825399242a484113af8899f9f55_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:17,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/50e802bebea047099d725a68c669a33e, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:17,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/50e802bebea047099d725a68c669a33e is 175, key is test_row_0/A:col10/1733801775879/Put/seqid=0 2024-12-10T03:36:17,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742360_1536 (size=31105) 2024-12-10T03:36:17,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801837335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801837335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801837336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,541 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/4b9313a27db9419985a813d5a54e0ccb as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/4b9313a27db9419985a813d5a54e0ccb 2024-12-10T03:36:17,544 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/A of 177cb6154ba45b2806835557c21cb1d3 into 4b9313a27db9419985a813d5a54e0ccb(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:17,544 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:17,544 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/A, priority=12, startTime=1733801776719; duration=0sec 2024-12-10T03:36:17,544 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:17,544 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:A 2024-12-10T03:36:17,586 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/50e802bebea047099d725a68c669a33e 2024-12-10T03:36:17,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/e2ba030d80fe4d8f878461728a10c872 is 50, key is test_row_0/B:col10/1733801775879/Put/seqid=0 2024-12-10T03:36:17,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742361_1537 (size=12151) 2024-12-10T03:36:17,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801837637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801837638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:17,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801837638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T03:36:18,000 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/e2ba030d80fe4d8f878461728a10c872 2024-12-10T03:36:18,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/e6c934a0a9aa484793a412698cd67e9b is 50, key is test_row_0/C:col10/1733801775879/Put/seqid=0 2024-12-10T03:36:18,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742362_1538 (size=12151) 2024-12-10T03:36:18,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:18,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52806 deadline: 1733801838019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:18,020 DEBUG [Thread-2134 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4123 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., hostname=50b9ef1c5472,37553,1733801610862, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T03:36:18,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52754 deadline: 1733801838140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:18,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:18,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52804 deadline: 1733801838142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:18,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T03:36:18,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52752 deadline: 1733801838143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:18,419 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/e6c934a0a9aa484793a412698cd67e9b 2024-12-10T03:36:18,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/50e802bebea047099d725a68c669a33e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/50e802bebea047099d725a68c669a33e 2024-12-10T03:36:18,424 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/50e802bebea047099d725a68c669a33e, entries=150, sequenceid=234, filesize=30.4 K 2024-12-10T03:36:18,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/e2ba030d80fe4d8f878461728a10c872 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e2ba030d80fe4d8f878461728a10c872 2024-12-10T03:36:18,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,427 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e2ba030d80fe4d8f878461728a10c872, entries=150, sequenceid=234, filesize=11.9 K 2024-12-10T03:36:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/e6c934a0a9aa484793a412698cd67e9b as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/e6c934a0a9aa484793a412698cd67e9b 2024-12-10T03:36:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,430 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/e6c934a0a9aa484793a412698cd67e9b, entries=150, sequenceid=234, filesize=11.9 K 2024-12-10T03:36:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,431 INFO [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 177cb6154ba45b2806835557c21cb1d3 in 1667ms, sequenceid=234, compaction requested=false 2024-12-10T03:36:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:18,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:18,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/50b9ef1c5472:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-10T03:36:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-10T03:36:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-10T03:36:18,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7320 sec 2024-12-10T03:36:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,434 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.7340 sec 2024-12-10T03:36:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:18,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:19,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T03:36:19,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:19,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:19,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:19,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:19,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:19,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121013323b65a2494825834200f1a6b3e324_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801779144/Put/seqid=0 2024-12-10T03:36:19,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742363_1539 (size=12304) 2024-12-10T03:36:19,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,161 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,165 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121013323b65a2494825834200f1a6b3e324_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121013323b65a2494825834200f1a6b3e324_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:19,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,166 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/c9013de7c2e244468d724d9dca6b73f1, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:19,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/c9013de7c2e244468d724d9dca6b73f1 is 175, key is test_row_0/A:col10/1733801779144/Put/seqid=0 2024-12-10T03:36:19,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37553 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:19,170 DEBUG [Thread-2136 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:51621 2024-12-10T03:36:19,170 DEBUG [Thread-2138 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:51621 2024-12-10T03:36:19,170 DEBUG [Thread-2136 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:19,170 DEBUG [Thread-2138 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:19,170 DEBUG [Thread-2147 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:51621 2024-12-10T03:36:19,170 DEBUG [Thread-2147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:19,171 DEBUG [Thread-2141 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:51621 2024-12-10T03:36:19,171 DEBUG [Thread-2141 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:19,172 DEBUG [Thread-2149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:51621 2024-12-10T03:36:19,172 DEBUG [Thread-2149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:19,172 DEBUG [Thread-2143 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:51621 2024-12-10T03:36:19,172 DEBUG [Thread-2143 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:19,173 DEBUG [Thread-2132 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:51621 2024-12-10T03:36:19,173 DEBUG [Thread-2132 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:19,173 DEBUG [Thread-2145 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:51621 2024-12-10T03:36:19,173 DEBUG [Thread-2145 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:19,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742364_1540 (size=31105) 2024-12-10T03:36:19,579 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/c9013de7c2e244468d724d9dca6b73f1 2024-12-10T03:36:19,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/639d3e1828754158a50b829797714837 is 50, key is test_row_0/B:col10/1733801779144/Put/seqid=0 2024-12-10T03:36:19,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742365_1541 (size=12151) 2024-12-10T03:36:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T03:36:19,806 INFO [Thread-2140 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-10T03:36:19,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/639d3e1828754158a50b829797714837 2024-12-10T03:36:20,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb is 50, key is test_row_0/C:col10/1733801779144/Put/seqid=0 2024-12-10T03:36:20,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742366_1542 (size=12151) 2024-12-10T03:36:20,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb 2024-12-10T03:36:20,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/c9013de7c2e244468d724d9dca6b73f1 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c9013de7c2e244468d724d9dca6b73f1 2024-12-10T03:36:20,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c9013de7c2e244468d724d9dca6b73f1, entries=150, sequenceid=250, filesize=30.4 K 2024-12-10T03:36:20,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/639d3e1828754158a50b829797714837 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/639d3e1828754158a50b829797714837 2024-12-10T03:36:20,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/639d3e1828754158a50b829797714837, entries=150, sequenceid=250, filesize=11.9 K 2024-12-10T03:36:20,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb 2024-12-10T03:36:20,439 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb, entries=150, sequenceid=250, filesize=11.9 K 2024-12-10T03:36:20,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=87.22 KB/89310 for 177cb6154ba45b2806835557c21cb1d3 in 1295ms, sequenceid=250, compaction requested=true 2024-12-10T03:36:20,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T03:36:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T03:36:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:20,440 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 177cb6154ba45b2806835557c21cb1d3:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T03:36:20,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:20,440 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:20,441 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:20,441 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:20,441 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/B is initiating minor compaction (all files) 2024-12-10T03:36:20,441 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/A is initiating minor compaction (all files) 2024-12-10T03:36:20,441 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/A in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:20,441 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/B in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:20,441 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/75fb33d513fd4d67a17cdcb5542c9e5a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e2ba030d80fe4d8f878461728a10c872, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/639d3e1828754158a50b829797714837] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=36.1 K 2024-12-10T03:36:20,441 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/4b9313a27db9419985a813d5a54e0ccb, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/50e802bebea047099d725a68c669a33e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c9013de7c2e244468d724d9dca6b73f1] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=91.6 K 2024-12-10T03:36:20,441 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=53.85 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:20,441 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. files: [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/4b9313a27db9419985a813d5a54e0ccb, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/50e802bebea047099d725a68c669a33e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c9013de7c2e244468d724d9dca6b73f1] 2024-12-10T03:36:20,442 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 75fb33d513fd4d67a17cdcb5542c9e5a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733801773890 2024-12-10T03:36:20,442 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b9313a27db9419985a813d5a54e0ccb, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733801773890 2024-12-10T03:36:20,442 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting e2ba030d80fe4d8f878461728a10c872, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733801775879 2024-12-10T03:36:20,442 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50e802bebea047099d725a68c669a33e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733801775879 2024-12-10T03:36:20,442 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting 639d3e1828754158a50b829797714837, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733801777005 2024-12-10T03:36:20,443 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9013de7c2e244468d724d9dca6b73f1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733801777005 2024-12-10T03:36:20,450 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:20,451 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#B#compaction#456 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:20,451 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/bdcc0a68683e45298309c7359da28443 is 50, key is test_row_0/B:col10/1733801779144/Put/seqid=0 2024-12-10T03:36:20,452 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412108c42e493a48f462abe2e28d468745f41_177cb6154ba45b2806835557c21cb1d3 store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:20,455 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412108c42e493a48f462abe2e28d468745f41_177cb6154ba45b2806835557c21cb1d3, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:20,455 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108c42e493a48f462abe2e28d468745f41_177cb6154ba45b2806835557c21cb1d3 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:20,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742367_1543 (size=12731) 2024-12-10T03:36:20,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742368_1544 (size=4469) 2024-12-10T03:36:20,862 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#A#compaction#457 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:20,863 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/108273617e2744b29c24fc16d48525db is 175, key is test_row_0/A:col10/1733801779144/Put/seqid=0 2024-12-10T03:36:20,870 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/bdcc0a68683e45298309c7359da28443 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/bdcc0a68683e45298309c7359da28443 2024-12-10T03:36:20,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742369_1545 (size=31685) 2024-12-10T03:36:20,874 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/B of 177cb6154ba45b2806835557c21cb1d3 into bdcc0a68683e45298309c7359da28443(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:20,875 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:20,875 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/B, priority=13, startTime=1733801780440; duration=0sec 2024-12-10T03:36:20,875 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T03:36:20,875 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:B 2024-12-10T03:36:20,875 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T03:36:20,876 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T03:36:20,876 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1540): 177cb6154ba45b2806835557c21cb1d3/C is initiating minor compaction (all files) 2024-12-10T03:36:20,876 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 177cb6154ba45b2806835557c21cb1d3/C in TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:20,876 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/a67465f001894b12b3e8bfb87d91c91d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/e6c934a0a9aa484793a412698cd67e9b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb] into tmpdir=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp, totalSize=36.1 K 2024-12-10T03:36:20,876 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting a67465f001894b12b3e8bfb87d91c91d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733801773890 2024-12-10T03:36:20,877 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting e6c934a0a9aa484793a412698cd67e9b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733801775879 2024-12-10T03:36:20,877 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] compactions.Compactor(224): Compacting f9f8cbd54c3f43f2bebe3cba6a65cbeb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733801777005 2024-12-10T03:36:20,883 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 177cb6154ba45b2806835557c21cb1d3#C#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 53.85 MB/second 2024-12-10T03:36:20,884 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/f548c8c2fb3648e891b536118fa7b3e0 is 50, key is test_row_0/C:col10/1733801779144/Put/seqid=0 2024-12-10T03:36:20,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742370_1546 (size=12731) 2024-12-10T03:36:21,282 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/108273617e2744b29c24fc16d48525db as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/108273617e2744b29c24fc16d48525db 2024-12-10T03:36:21,286 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/A of 177cb6154ba45b2806835557c21cb1d3 into 108273617e2744b29c24fc16d48525db(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:21,286 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:21,286 INFO [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/A, priority=13, startTime=1733801780440; duration=0sec 2024-12-10T03:36:21,286 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:21,286 DEBUG [RS:0;50b9ef1c5472:37553-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:A 2024-12-10T03:36:21,291 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/f548c8c2fb3648e891b536118fa7b3e0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f548c8c2fb3648e891b536118fa7b3e0 2024-12-10T03:36:21,294 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 177cb6154ba45b2806835557c21cb1d3/C of 177cb6154ba45b2806835557c21cb1d3 into f548c8c2fb3648e891b536118fa7b3e0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T03:36:21,294 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:21,294 INFO [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3., storeName=177cb6154ba45b2806835557c21cb1d3/C, priority=13, startTime=1733801780440; duration=0sec 2024-12-10T03:36:21,294 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T03:36:21,294 DEBUG [RS:0;50b9ef1c5472:37553-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 177cb6154ba45b2806835557c21cb1d3:C 2024-12-10T03:36:22,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37553 {}] regionserver.HRegion(8581): Flush requested on 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:22,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-10T03:36:22,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:22,027 DEBUG [Thread-2134 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:51621 2024-12-10T03:36:22,027 DEBUG [Thread-2134 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:22,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:22,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:22,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:22,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:22,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:22,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ce88d213d2ae4dfa8a82d58693008c81_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_0/A:col10/1733801779162/Put/seqid=0 2024-12-10T03:36:22,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742371_1547 (size=12454) 2024-12-10T03:36:22,172 DEBUG [Thread-2130 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:51621 2024-12-10T03:36:22,173 DEBUG [Thread-2130 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T03:36:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-10T03:36:22,173 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9302 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9233 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9097 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9337 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 9203 2024-12-10T03:36:22,174 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T03:36:22,174 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T03:36:22,174 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:51621 2024-12-10T03:36:22,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:22,175 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T03:36:22,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T03:36:22,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T03:36:22,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T03:36:22,179 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801782179"}]},"ts":"1733801782179"} 2024-12-10T03:36:22,180 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T03:36:22,197 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T03:36:22,197 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T03:36:22,198 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, UNASSIGN}] 2024-12-10T03:36:22,198 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=165, ppid=164, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, UNASSIGN 2024-12-10T03:36:22,199 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=165 updating hbase:meta row=177cb6154ba45b2806835557c21cb1d3, regionState=CLOSING, regionLocation=50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:22,199 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T03:36:22,199 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; CloseRegionProcedure 177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862}] 2024-12-10T03:36:22,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T03:36:22,351 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:22,351 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(124): Close 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:22,351 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T03:36:22,351 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1681): Closing 177cb6154ba45b2806835557c21cb1d3, disabling compactions & flushes 2024-12-10T03:36:22,351 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:22,443 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:22,451 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ce88d213d2ae4dfa8a82d58693008c81_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ce88d213d2ae4dfa8a82d58693008c81_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:22,453 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/efb30cae2c864918904d9f3b9618a340, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:22,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/efb30cae2c864918904d9f3b9618a340 is 175, key is test_row_0/A:col10/1733801779162/Put/seqid=0 2024-12-10T03:36:22,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742372_1548 (size=31255) 2024-12-10T03:36:22,458 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=270, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/efb30cae2c864918904d9f3b9618a340 2024-12-10T03:36:22,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/197570a4dbb241769347ca45600ff386 is 50, key is test_row_0/B:col10/1733801779162/Put/seqid=0 2024-12-10T03:36:22,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742373_1549 (size=12301) 2024-12-10T03:36:22,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T03:36:22,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T03:36:22,871 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/197570a4dbb241769347ca45600ff386 2024-12-10T03:36:22,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/8f562083659642d5a593a875aed2b050 is 50, key is test_row_0/C:col10/1733801779162/Put/seqid=0 2024-12-10T03:36:22,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742374_1550 (size=12301) 2024-12-10T03:36:23,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T03:36:23,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/8f562083659642d5a593a875aed2b050 2024-12-10T03:36:23,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/efb30cae2c864918904d9f3b9618a340 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/efb30cae2c864918904d9f3b9618a340 2024-12-10T03:36:23,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/efb30cae2c864918904d9f3b9618a340, entries=150, sequenceid=270, filesize=30.5 K 2024-12-10T03:36:23,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/197570a4dbb241769347ca45600ff386 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/197570a4dbb241769347ca45600ff386 2024-12-10T03:36:23,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/197570a4dbb241769347ca45600ff386, entries=150, sequenceid=270, filesize=12.0 K 2024-12-10T03:36:23,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/8f562083659642d5a593a875aed2b050 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/8f562083659642d5a593a875aed2b050 2024-12-10T03:36:23,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/8f562083659642d5a593a875aed2b050, entries=150, sequenceid=270, filesize=12.0 K 2024-12-10T03:36:23,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=6.71 KB/6870 for 177cb6154ba45b2806835557c21cb1d3 in 1287ms, sequenceid=270, compaction requested=false 2024-12-10T03:36:23,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:23,313 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. after waiting 0 ms 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:23,314 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(2837): Flushing 177cb6154ba45b2806835557c21cb1d3 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=A 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=B 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 177cb6154ba45b2806835557c21cb1d3, store=C 2024-12-10T03:36:23,314 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T03:36:23,319 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121016679b50b9f94476b565e856f53f280e_177cb6154ba45b2806835557c21cb1d3 is 50, key is test_row_1/A:col10/1733801782170/Put/seqid=0 2024-12-10T03:36:23,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742375_1551 (size=7374) 2024-12-10T03:36:23,722 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T03:36:23,749 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121016679b50b9f94476b565e856f53f280e_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121016679b50b9f94476b565e856f53f280e_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:23,750 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/e68a9b79eb2b49f0af60a4498f255a7e, store: [table=TestAcidGuarantees family=A region=177cb6154ba45b2806835557c21cb1d3] 2024-12-10T03:36:23,751 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/e68a9b79eb2b49f0af60a4498f255a7e is 175, key is test_row_1/A:col10/1733801782170/Put/seqid=0 2024-12-10T03:36:23,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742376_1552 (size=13865) 2024-12-10T03:36:24,159 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/e68a9b79eb2b49f0af60a4498f255a7e 2024-12-10T03:36:24,171 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/b2f4bf35424d4e6cbc94edacb11fe1bd is 50, key is test_row_1/B:col10/1733801782170/Put/seqid=0 2024-12-10T03:36:24,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742377_1553 (size=7415) 2024-12-10T03:36:24,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T03:36:24,577 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/b2f4bf35424d4e6cbc94edacb11fe1bd 2024-12-10T03:36:24,589 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/ea7ea3dbd42a4cf2ad8b29267ee059c5 is 50, key is test_row_1/C:col10/1733801782170/Put/seqid=0 2024-12-10T03:36:24,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742378_1554 (size=7415) 2024-12-10T03:36:24,994 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/ea7ea3dbd42a4cf2ad8b29267ee059c5 2024-12-10T03:36:25,003 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/A/e68a9b79eb2b49f0af60a4498f255a7e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/e68a9b79eb2b49f0af60a4498f255a7e 2024-12-10T03:36:25,007 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/e68a9b79eb2b49f0af60a4498f255a7e, entries=50, sequenceid=274, filesize=13.5 K 2024-12-10T03:36:25,008 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/B/b2f4bf35424d4e6cbc94edacb11fe1bd as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/b2f4bf35424d4e6cbc94edacb11fe1bd 2024-12-10T03:36:25,012 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/b2f4bf35424d4e6cbc94edacb11fe1bd, entries=50, sequenceid=274, filesize=7.2 K 2024-12-10T03:36:25,013 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/.tmp/C/ea7ea3dbd42a4cf2ad8b29267ee059c5 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ea7ea3dbd42a4cf2ad8b29267ee059c5 2024-12-10T03:36:25,017 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ea7ea3dbd42a4cf2ad8b29267ee059c5, entries=50, sequenceid=274, filesize=7.2 K 2024-12-10T03:36:25,018 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for 177cb6154ba45b2806835557c21cb1d3 in 1704ms, sequenceid=274, compaction requested=true 2024-12-10T03:36:25,019 DEBUG [StoreCloser-TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8a07313a0c97400f9b6c6e01ab26f2e3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8edce30291014bbe83bfc439b618fda5, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c557a57b9b284678a0c91bf3cbe09605, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/f7acc157b40d4f118375ff1add3a8566, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/a456b0c864ba48e4a865292956cf695c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bcb4ec106ffa4a79a1d2e2e23023ef20, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/184cd040e95c46179f7456d80798b5d8, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bfa4a18a8083493ca9435ad9d4c556be, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/0ecd2c1dbb3a42ffb76a3c4310633207, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/eb704ff2b63d4effaff7ab39b268a76b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/607cf32993c545b5860f319473ae2e06, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/11253e94e7ca44b3b30de746c9691875, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/99cda2e3a0304049806850e8f832bbc3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/316da7b3b1dd4f5d8414a7bc5c6d35d2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/4b9313a27db9419985a813d5a54e0ccb, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/50e802bebea047099d725a68c669a33e, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c9013de7c2e244468d724d9dca6b73f1] to archive 2024-12-10T03:36:25,020 DEBUG [StoreCloser-TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:36:25,022 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8a07313a0c97400f9b6c6e01ab26f2e3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8a07313a0c97400f9b6c6e01ab26f2e3 2024-12-10T03:36:25,022 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c557a57b9b284678a0c91bf3cbe09605 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c557a57b9b284678a0c91bf3cbe09605 2024-12-10T03:36:25,022 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/f7acc157b40d4f118375ff1add3a8566 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/f7acc157b40d4f118375ff1add3a8566 2024-12-10T03:36:25,023 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8edce30291014bbe83bfc439b618fda5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/8edce30291014bbe83bfc439b618fda5 2024-12-10T03:36:25,023 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bcb4ec106ffa4a79a1d2e2e23023ef20 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bcb4ec106ffa4a79a1d2e2e23023ef20 2024-12-10T03:36:25,023 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/a456b0c864ba48e4a865292956cf695c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/a456b0c864ba48e4a865292956cf695c 2024-12-10T03:36:25,023 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/184cd040e95c46179f7456d80798b5d8 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/184cd040e95c46179f7456d80798b5d8 2024-12-10T03:36:25,024 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bfa4a18a8083493ca9435ad9d4c556be to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/bfa4a18a8083493ca9435ad9d4c556be 2024-12-10T03:36:25,024 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/eb704ff2b63d4effaff7ab39b268a76b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/eb704ff2b63d4effaff7ab39b268a76b 2024-12-10T03:36:25,025 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/607cf32993c545b5860f319473ae2e06 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/607cf32993c545b5860f319473ae2e06 2024-12-10T03:36:25,025 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/0ecd2c1dbb3a42ffb76a3c4310633207 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/0ecd2c1dbb3a42ffb76a3c4310633207 2024-12-10T03:36:25,025 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/11253e94e7ca44b3b30de746c9691875 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/11253e94e7ca44b3b30de746c9691875 2024-12-10T03:36:25,025 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/316da7b3b1dd4f5d8414a7bc5c6d35d2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/316da7b3b1dd4f5d8414a7bc5c6d35d2 2024-12-10T03:36:25,025 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/4b9313a27db9419985a813d5a54e0ccb to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/4b9313a27db9419985a813d5a54e0ccb 2024-12-10T03:36:25,025 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/99cda2e3a0304049806850e8f832bbc3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/99cda2e3a0304049806850e8f832bbc3 2024-12-10T03:36:25,026 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/50e802bebea047099d725a68c669a33e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/50e802bebea047099d725a68c669a33e 2024-12-10T03:36:25,026 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c9013de7c2e244468d724d9dca6b73f1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/c9013de7c2e244468d724d9dca6b73f1 2024-12-10T03:36:25,027 DEBUG [StoreCloser-TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/aaed304551854108bc98c77e32d6f726, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/21735d3c0bac43508c7b761dda1d1ca4, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/5e3f90414cea4008afb8c0da826c01eb, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/96376090310947fe9f10bfdac16b1958, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2671cc2250014defb97c158f3c9bbe7c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/53eec2f4d55945a2a497c5c55f9d40c0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/8f0373b4148b4eae861d64a47616f08a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2afbe257c6ce4a168956e0135a445db2, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e1995715dcf641ada3dc05de820d52b0, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/c283b59db7d94b799adb99b3c2e606c3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/64a8755427634d92b95684d7b5a33a45, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/732f1440f3764e59b5aee86f2dfc5670, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/d682667becec4758b331355f6e410303, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/75fb33d513fd4d67a17cdcb5542c9e5a, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/de5b1f323b6c443dbed7da498f4387f1, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e2ba030d80fe4d8f878461728a10c872, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/639d3e1828754158a50b829797714837] to archive 2024-12-10T03:36:25,027 DEBUG [StoreCloser-TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:36:25,029 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/aaed304551854108bc98c77e32d6f726 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/aaed304551854108bc98c77e32d6f726 2024-12-10T03:36:25,029 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/21735d3c0bac43508c7b761dda1d1ca4 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/21735d3c0bac43508c7b761dda1d1ca4 2024-12-10T03:36:25,029 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/96376090310947fe9f10bfdac16b1958 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/96376090310947fe9f10bfdac16b1958 2024-12-10T03:36:25,029 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2afbe257c6ce4a168956e0135a445db2 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2afbe257c6ce4a168956e0135a445db2 2024-12-10T03:36:25,030 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/5e3f90414cea4008afb8c0da826c01eb to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/5e3f90414cea4008afb8c0da826c01eb 2024-12-10T03:36:25,030 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/8f0373b4148b4eae861d64a47616f08a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/8f0373b4148b4eae861d64a47616f08a 2024-12-10T03:36:25,030 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2671cc2250014defb97c158f3c9bbe7c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/2671cc2250014defb97c158f3c9bbe7c 2024-12-10T03:36:25,030 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/53eec2f4d55945a2a497c5c55f9d40c0 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/53eec2f4d55945a2a497c5c55f9d40c0 2024-12-10T03:36:25,031 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e1995715dcf641ada3dc05de820d52b0 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e1995715dcf641ada3dc05de820d52b0 2024-12-10T03:36:25,031 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/c283b59db7d94b799adb99b3c2e606c3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/c283b59db7d94b799adb99b3c2e606c3 2024-12-10T03:36:25,031 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/de5b1f323b6c443dbed7da498f4387f1 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/de5b1f323b6c443dbed7da498f4387f1 2024-12-10T03:36:25,031 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/64a8755427634d92b95684d7b5a33a45 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/64a8755427634d92b95684d7b5a33a45 2024-12-10T03:36:25,031 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/75fb33d513fd4d67a17cdcb5542c9e5a to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/75fb33d513fd4d67a17cdcb5542c9e5a 2024-12-10T03:36:25,031 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e2ba030d80fe4d8f878461728a10c872 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/e2ba030d80fe4d8f878461728a10c872 2024-12-10T03:36:25,031 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/d682667becec4758b331355f6e410303 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/d682667becec4758b331355f6e410303 2024-12-10T03:36:25,031 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/732f1440f3764e59b5aee86f2dfc5670 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/732f1440f3764e59b5aee86f2dfc5670 2024-12-10T03:36:25,032 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/639d3e1828754158a50b829797714837 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/639d3e1828754158a50b829797714837 2024-12-10T03:36:25,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/879a158e96e1447aa204dfe1f4a366fe, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9ca35b3596ba4c1e99e9cc45a2a25ffe, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b220544c2bb7499bbc6c9c3c5a3659ac, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9c7fa86718094911bd394509d601cefe, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d34a1217c262468c8922e34ea8db4c65, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c8b3f44878964112b4f4abcd9bc9a6ee, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/10af33a326e64269b36a33200c9c3030, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/554db4cc8c8047e6b048c49f356b6404, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ada165946ead4507a0fc00162926d574, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ee9f1406e70241eca292b191e3b87a8c, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c95c4f1fbdf4428eb03f7b3bc9106cf7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d33285621d8c4793b83bd3ff8bfb3556, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/25d15f492af4497c99d7bef275fcebd3, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/a67465f001894b12b3e8bfb87d91c91d, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b62b35a170384e56b2a5c60144dd66d7, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/e6c934a0a9aa484793a412698cd67e9b, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb] to archive 2024-12-10T03:36:25,033 DEBUG [StoreCloser-TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T03:36:25,035 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9ca35b3596ba4c1e99e9cc45a2a25ffe to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9ca35b3596ba4c1e99e9cc45a2a25ffe 2024-12-10T03:36:25,035 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/879a158e96e1447aa204dfe1f4a366fe to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/879a158e96e1447aa204dfe1f4a366fe 2024-12-10T03:36:25,035 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9c7fa86718094911bd394509d601cefe to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/9c7fa86718094911bd394509d601cefe 2024-12-10T03:36:25,035 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b220544c2bb7499bbc6c9c3c5a3659ac to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b220544c2bb7499bbc6c9c3c5a3659ac 2024-12-10T03:36:25,035 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/10af33a326e64269b36a33200c9c3030 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/10af33a326e64269b36a33200c9c3030 2024-12-10T03:36:25,035 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d34a1217c262468c8922e34ea8db4c65 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d34a1217c262468c8922e34ea8db4c65 2024-12-10T03:36:25,035 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/554db4cc8c8047e6b048c49f356b6404 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/554db4cc8c8047e6b048c49f356b6404 2024-12-10T03:36:25,035 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c8b3f44878964112b4f4abcd9bc9a6ee to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c8b3f44878964112b4f4abcd9bc9a6ee 2024-12-10T03:36:25,036 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c95c4f1fbdf4428eb03f7b3bc9106cf7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/c95c4f1fbdf4428eb03f7b3bc9106cf7 2024-12-10T03:36:25,037 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ada165946ead4507a0fc00162926d574 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ada165946ead4507a0fc00162926d574 2024-12-10T03:36:25,037 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ee9f1406e70241eca292b191e3b87a8c to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ee9f1406e70241eca292b191e3b87a8c 2024-12-10T03:36:25,037 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d33285621d8c4793b83bd3ff8bfb3556 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/d33285621d8c4793b83bd3ff8bfb3556 2024-12-10T03:36:25,037 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b62b35a170384e56b2a5c60144dd66d7 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/b62b35a170384e56b2a5c60144dd66d7 2024-12-10T03:36:25,037 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/25d15f492af4497c99d7bef275fcebd3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/25d15f492af4497c99d7bef275fcebd3 2024-12-10T03:36:25,037 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/e6c934a0a9aa484793a412698cd67e9b to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/e6c934a0a9aa484793a412698cd67e9b 2024-12-10T03:36:25,037 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/a67465f001894b12b3e8bfb87d91c91d to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/a67465f001894b12b3e8bfb87d91c91d 2024-12-10T03:36:25,037 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f9f8cbd54c3f43f2bebe3cba6a65cbeb 2024-12-10T03:36:25,040 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/recovered.edits/277.seqid, newMaxSeqId=277, maxSeqId=4 2024-12-10T03:36:25,041 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3. 2024-12-10T03:36:25,041 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1635): Region close journal for 177cb6154ba45b2806835557c21cb1d3: 2024-12-10T03:36:25,042 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(170): Closed 177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:25,042 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=165 updating hbase:meta row=177cb6154ba45b2806835557c21cb1d3, regionState=CLOSED 2024-12-10T03:36:25,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-10T03:36:25,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; CloseRegionProcedure 177cb6154ba45b2806835557c21cb1d3, server=50b9ef1c5472,37553,1733801610862 in 2.8440 sec 2024-12-10T03:36:25,045 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-10T03:36:25,045 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=177cb6154ba45b2806835557c21cb1d3, UNASSIGN in 2.8460 sec 2024-12-10T03:36:25,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-10T03:36:25,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.8480 sec 2024-12-10T03:36:25,046 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733801785046"}]},"ts":"1733801785046"} 2024-12-10T03:36:25,047 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T03:36:25,088 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T03:36:25,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.9130 sec 2024-12-10T03:36:26,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T03:36:26,289 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-10T03:36:26,290 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T03:36:26,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:36:26,294 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=167, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:36:26,295 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=167, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:36:26,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T03:36:26,298 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,301 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C, FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/recovered.edits] 2024-12-10T03:36:26,306 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/108273617e2744b29c24fc16d48525db to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/108273617e2744b29c24fc16d48525db 2024-12-10T03:36:26,307 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/e68a9b79eb2b49f0af60a4498f255a7e to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/e68a9b79eb2b49f0af60a4498f255a7e 2024-12-10T03:36:26,307 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/efb30cae2c864918904d9f3b9618a340 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/A/efb30cae2c864918904d9f3b9618a340 2024-12-10T03:36:26,312 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/197570a4dbb241769347ca45600ff386 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/197570a4dbb241769347ca45600ff386 2024-12-10T03:36:26,312 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/b2f4bf35424d4e6cbc94edacb11fe1bd to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/b2f4bf35424d4e6cbc94edacb11fe1bd 2024-12-10T03:36:26,312 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/bdcc0a68683e45298309c7359da28443 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/B/bdcc0a68683e45298309c7359da28443 2024-12-10T03:36:26,317 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/8f562083659642d5a593a875aed2b050 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/8f562083659642d5a593a875aed2b050 2024-12-10T03:36:26,317 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f548c8c2fb3648e891b536118fa7b3e0 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/f548c8c2fb3648e891b536118fa7b3e0 2024-12-10T03:36:26,317 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ea7ea3dbd42a4cf2ad8b29267ee059c5 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/C/ea7ea3dbd42a4cf2ad8b29267ee059c5 2024-12-10T03:36:26,322 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/recovered.edits/277.seqid to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3/recovered.edits/277.seqid 2024-12-10T03:36:26,322 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/default/TestAcidGuarantees/177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,322 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T03:36:26,323 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T03:36:26,324 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T03:36:26,328 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210254ff424b63f4861a62d5b237a5e9ac2_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210254ff424b63f4861a62d5b237a5e9ac2_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,328 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101049713dda494ff1983d1c5d4f91b256_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101049713dda494ff1983d1c5d4f91b256_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,328 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a6964d38e0fd4072a37a5b9630b78ae4_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a6964d38e0fd4072a37a5b9630b78ae4_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,328 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103bf2bac8681d4b028b2dc1aa7b0ceebc_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412103bf2bac8681d4b028b2dc1aa7b0ceebc_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,328 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121013323b65a2494825834200f1a6b3e324_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121013323b65a2494825834200f1a6b3e324_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,328 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121062f62e397a1b4a37bbda029af246da31_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121062f62e397a1b4a37bbda029af246da31_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,328 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121016679b50b9f94476b565e856f53f280e_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121016679b50b9f94476b565e856f53f280e_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,328 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108617ef0e877f450290fa96d188d029f3_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108617ef0e877f450290fa96d188d029f3_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,329 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bb84b825399242a484113af8899f9f55_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bb84b825399242a484113af8899f9f55_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,329 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ce88d213d2ae4dfa8a82d58693008c81_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ce88d213d2ae4dfa8a82d58693008c81_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,329 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bdce5d9babf942d4b41e45612a92c82e_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bdce5d9babf942d4b41e45612a92c82e_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,330 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e495232d75d2411d93182a2fb5d933ee_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e495232d75d2411d93182a2fb5d933ee_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,330 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e112d116c4bd4bbd9cc5a1ad677e4560_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e112d116c4bd4bbd9cc5a1ad677e4560_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,330 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f2a514984f26435aa59ffc8d1427ed7e_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f2a514984f26435aa59ffc8d1427ed7e_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,330 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ea53061d404f462182df42abadee98bb_177cb6154ba45b2806835557c21cb1d3 to hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ea53061d404f462182df42abadee98bb_177cb6154ba45b2806835557c21cb1d3 2024-12-10T03:36:26,330 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T03:36:26,332 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=167, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:36:26,333 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T03:36:26,335 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T03:36:26,336 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=167, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:36:26,336 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T03:36:26,336 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733801786336"}]},"ts":"9223372036854775807"} 2024-12-10T03:36:26,337 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T03:36:26,337 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 177cb6154ba45b2806835557c21cb1d3, NAME => 'TestAcidGuarantees,,1733801755915.177cb6154ba45b2806835557c21cb1d3.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T03:36:26,337 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T03:36:26,337 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733801786337"}]},"ts":"9223372036854775807"} 2024-12-10T03:36:26,338 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T03:36:26,380 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=167, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T03:36:26,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 91 msec 2024-12-10T03:36:26,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42969 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T03:36:26,396 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-10T03:36:26,403 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=246 (was 244) - Thread LEAK? -, OpenFileDescriptor=453 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=295 (was 295), ProcessCount=11 (was 11), AvailableMemoryMB=3257 (was 3286) 2024-12-10T03:36:26,403 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-10T03:36:26,403 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T03:36:26,403 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:51621 2024-12-10T03:36:26,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:26,403 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T03:36:26,404 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=942130827, stopped=false 2024-12-10T03:36:26,404 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=50b9ef1c5472,42969,1733801610154 2024-12-10T03:36:26,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T03:36:26,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T03:36:26,413 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-10T03:36:26,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:36:26,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:36:26,414 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:26,414 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '50b9ef1c5472,37553,1733801610862' ***** 2024-12-10T03:36:26,414 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-10T03:36:26,414 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T03:36:26,414 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T03:36:26,414 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T03:36:26,414 INFO [RS:0;50b9ef1c5472:37553 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T03:36:26,414 INFO [RS:0;50b9ef1c5472:37553 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T03:36:26,414 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-10T03:36:26,414 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(3579): Received CLOSE for c325cc16a4923122e0562a36b998907c 2024-12-10T03:36:26,415 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1224): stopping server 50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:26,415 DEBUG [RS:0;50b9ef1c5472:37553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:26,415 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T03:36:26,415 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T03:36:26,415 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T03:36:26,415 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-10T03:36:26,415 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing c325cc16a4923122e0562a36b998907c, disabling compactions & flushes 2024-12-10T03:36:26,415 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:36:26,415 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:36:26,415 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. after waiting 0 ms 2024-12-10T03:36:26,415 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:36:26,415 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-10T03:36:26,415 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing c325cc16a4923122e0562a36b998907c 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-10T03:36:26,415 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1603): Online Regions={c325cc16a4923122e0562a36b998907c=hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c., 1588230740=hbase:meta,,1.1588230740} 2024-12-10T03:36:26,415 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-10T03:36:26,415 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-10T03:36:26,415 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-10T03:36:26,415 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T03:36:26,415 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T03:36:26,416 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-10T03:36:26,416 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, c325cc16a4923122e0562a36b998907c 2024-12-10T03:36:26,428 INFO [regionserver/50b9ef1c5472:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T03:36:26,429 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c/.tmp/info/d51d6672c1ac445a858db25a7ac3872f is 45, key is default/info:d/1733801616413/Put/seqid=0 2024-12-10T03:36:26,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742379_1555 (size=5037) 2024-12-10T03:36:26,432 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c/.tmp/info/d51d6672c1ac445a858db25a7ac3872f 2024-12-10T03:36:26,435 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c/.tmp/info/d51d6672c1ac445a858db25a7ac3872f as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c/info/d51d6672c1ac445a858db25a7ac3872f 2024-12-10T03:36:26,437 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/info/d8bfa4579210460a842b90fc725bcf67 is 143, key is hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c./info:regioninfo/1733801616317/Put/seqid=0 2024-12-10T03:36:26,438 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c/info/d51d6672c1ac445a858db25a7ac3872f, entries=2, sequenceid=6, filesize=4.9 K 2024-12-10T03:36:26,439 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for c325cc16a4923122e0562a36b998907c in 24ms, sequenceid=6, compaction requested=false 2024-12-10T03:36:26,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742380_1556 (size=7725) 2024-12-10T03:36:26,446 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/namespace/c325cc16a4923122e0562a36b998907c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T03:36:26,446 INFO [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:36:26,446 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for c325cc16a4923122e0562a36b998907c: 2024-12-10T03:36:26,447 DEBUG [RS_CLOSE_REGION-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733801615030.c325cc16a4923122e0562a36b998907c. 2024-12-10T03:36:26,616 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T03:36:26,624 INFO [regionserver/50b9ef1c5472:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-10T03:36:26,624 INFO [regionserver/50b9ef1c5472:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-10T03:36:26,817 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T03:36:26,845 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/info/d8bfa4579210460a842b90fc725bcf67 2024-12-10T03:36:26,869 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/rep_barrier/114d45a53da64c5b928d840a4525e2e0 is 102, key is TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d./rep_barrier:/1733801642285/DeleteFamily/seqid=0 2024-12-10T03:36:26,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742381_1557 (size=6025) 2024-12-10T03:36:27,017 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T03:36:27,217 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T03:36:27,274 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/rep_barrier/114d45a53da64c5b928d840a4525e2e0 2024-12-10T03:36:27,298 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/table/e3c62337b4a144cfa311c9a5baf104d8 is 96, key is TestAcidGuarantees,,1733801616717.c1d70420fe32ca56286cb5d739cf351d./table:/1733801642285/DeleteFamily/seqid=0 2024-12-10T03:36:27,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742382_1558 (size=5942) 2024-12-10T03:36:27,418 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-10T03:36:27,418 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-10T03:36:27,418 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T03:36:27,618 DEBUG [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T03:36:27,702 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/table/e3c62337b4a144cfa311c9a5baf104d8 2024-12-10T03:36:27,712 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/info/d8bfa4579210460a842b90fc725bcf67 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/info/d8bfa4579210460a842b90fc725bcf67 2024-12-10T03:36:27,717 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/info/d8bfa4579210460a842b90fc725bcf67, entries=22, sequenceid=93, filesize=7.5 K 2024-12-10T03:36:27,718 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/rep_barrier/114d45a53da64c5b928d840a4525e2e0 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/rep_barrier/114d45a53da64c5b928d840a4525e2e0 2024-12-10T03:36:27,721 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/rep_barrier/114d45a53da64c5b928d840a4525e2e0, entries=6, sequenceid=93, filesize=5.9 K 2024-12-10T03:36:27,722 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/.tmp/table/e3c62337b4a144cfa311c9a5baf104d8 as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/table/e3c62337b4a144cfa311c9a5baf104d8 2024-12-10T03:36:27,725 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/table/e3c62337b4a144cfa311c9a5baf104d8, entries=9, sequenceid=93, filesize=5.8 K 2024-12-10T03:36:27,726 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1311ms, sequenceid=93, compaction requested=false 2024-12-10T03:36:27,730 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-10T03:36:27,730 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T03:36:27,731 INFO [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-10T03:36:27,731 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-10T03:36:27,731 DEBUG [RS_CLOSE_META-regionserver/50b9ef1c5472:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T03:36:27,818 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1250): stopping server 50b9ef1c5472,37553,1733801610862; all regions closed. 2024-12-10T03:36:27,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741834_1010 (size=26050) 2024-12-10T03:36:27,831 DEBUG [RS:0;50b9ef1c5472:37553 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/oldWALs 2024-12-10T03:36:27,831 INFO [RS:0;50b9ef1c5472:37553 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 50b9ef1c5472%2C37553%2C1733801610862.meta:.meta(num 1733801614763) 2024-12-10T03:36:27,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741832_1008 (size=12903691) 2024-12-10T03:36:27,836 DEBUG [RS:0;50b9ef1c5472:37553 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/oldWALs 2024-12-10T03:36:27,836 INFO [RS:0;50b9ef1c5472:37553 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 50b9ef1c5472%2C37553%2C1733801610862:(num 1733801613718) 2024-12-10T03:36:27,836 DEBUG [RS:0;50b9ef1c5472:37553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:27,836 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T03:36:27,836 INFO [RS:0;50b9ef1c5472:37553 {}] hbase.ChoreService(370): Chore service for: regionserver/50b9ef1c5472:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-10T03:36:27,837 INFO [regionserver/50b9ef1c5472:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T03:36:27,837 INFO [RS:0;50b9ef1c5472:37553 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37553 2024-12-10T03:36:27,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/50b9ef1c5472,37553,1733801610862 2024-12-10T03:36:27,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T03:36:27,888 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [50b9ef1c5472,37553,1733801610862] 2024-12-10T03:36:27,889 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 50b9ef1c5472,37553,1733801610862; numProcessing=1 2024-12-10T03:36:27,897 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/50b9ef1c5472,37553,1733801610862 already deleted, retry=false 2024-12-10T03:36:27,897 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 50b9ef1c5472,37553,1733801610862 expired; onlineServers=0 2024-12-10T03:36:27,897 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '50b9ef1c5472,42969,1733801610154' ***** 2024-12-10T03:36:27,897 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T03:36:27,898 DEBUG [M:0;50b9ef1c5472:42969 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61906447, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=50b9ef1c5472/172.17.0.2:0 2024-12-10T03:36:27,898 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HRegionServer(1224): stopping server 50b9ef1c5472,42969,1733801610154 2024-12-10T03:36:27,898 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HRegionServer(1250): stopping server 50b9ef1c5472,42969,1733801610154; all regions closed. 2024-12-10T03:36:27,898 DEBUG [M:0;50b9ef1c5472:42969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T03:36:27,898 DEBUG [M:0;50b9ef1c5472:42969 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T03:36:27,899 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T03:36:27,899 DEBUG [M:0;50b9ef1c5472:42969 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T03:36:27,899 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster-HFileCleaner.small.0-1733801613472 {}] cleaner.HFileCleaner(306): Exit Thread[master/50b9ef1c5472:0:becomeActiveMaster-HFileCleaner.small.0-1733801613472,5,FailOnTimeoutGroup] 2024-12-10T03:36:27,899 DEBUG [master/50b9ef1c5472:0:becomeActiveMaster-HFileCleaner.large.0-1733801613470 {}] cleaner.HFileCleaner(306): Exit Thread[master/50b9ef1c5472:0:becomeActiveMaster-HFileCleaner.large.0-1733801613470,5,FailOnTimeoutGroup] 2024-12-10T03:36:27,899 INFO [M:0;50b9ef1c5472:42969 {}] hbase.ChoreService(370): Chore service for: master/50b9ef1c5472:0 had [] on shutdown 2024-12-10T03:36:27,900 DEBUG [M:0;50b9ef1c5472:42969 {}] master.HMaster(1733): Stopping service threads 2024-12-10T03:36:27,900 INFO [M:0;50b9ef1c5472:42969 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T03:36:27,900 ERROR [M:0;50b9ef1c5472:42969 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:33939 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:33939,5,PEWorkerGroup] 2024-12-10T03:36:27,902 INFO [M:0;50b9ef1c5472:42969 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T03:36:27,902 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T03:36:27,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T03:36:27,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T03:36:27,909 DEBUG [M:0;50b9ef1c5472:42969 {}] zookeeper.ZKUtil(347): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T03:36:27,909 WARN [M:0;50b9ef1c5472:42969 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T03:36:27,909 INFO [M:0;50b9ef1c5472:42969 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-10T03:36:27,909 INFO [M:0;50b9ef1c5472:42969 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T03:36:27,909 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T03:36:27,909 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T03:36:27,909 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T03:36:27,909 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T03:36:27,909 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T03:36:27,909 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T03:36:27,909 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=721.72 KB heapSize=884.09 KB 2024-12-10T03:36:27,922 DEBUG [M:0;50b9ef1c5472:42969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd750268dd9e4107836792b771aa6bda is 82, key is hbase:meta,,1/info:regioninfo/1733801614887/Put/seqid=0 2024-12-10T03:36:27,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742383_1559 (size=5672) 2024-12-10T03:36:27,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T03:36:27,989 INFO [RS:0;50b9ef1c5472:37553 {}] regionserver.HRegionServer(1307): Exiting; stopping=50b9ef1c5472,37553,1733801610862; zookeeper connection closed. 2024-12-10T03:36:27,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37553-0x1000e00254c0001, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T03:36:27,989 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5da0034b {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5da0034b 2024-12-10T03:36:27,989 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-10T03:36:28,327 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=1992 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd750268dd9e4107836792b771aa6bda 2024-12-10T03:36:28,354 DEBUG [M:0;50b9ef1c5472:42969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/097e9adc78c54158934a1b3d3ad3348e is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x92/proc:d/1733801759036/Put/seqid=0 2024-12-10T03:36:28,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742384_1560 (size=42967) 2024-12-10T03:36:28,358 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=721.17 KB at sequenceid=1992 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/097e9adc78c54158934a1b3d3ad3348e 2024-12-10T03:36:28,361 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 097e9adc78c54158934a1b3d3ad3348e 2024-12-10T03:36:28,373 DEBUG [M:0;50b9ef1c5472:42969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd42089ef55c4a46958419d7e420e60c is 69, key is 50b9ef1c5472,37553,1733801610862/rs:state/1733801613495/Put/seqid=0 2024-12-10T03:36:28,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073742385_1561 (size=5156) 2024-12-10T03:36:28,777 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=1992 (bloomFilter=true), to=hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd42089ef55c4a46958419d7e420e60c 2024-12-10T03:36:28,785 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cd750268dd9e4107836792b771aa6bda as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cd750268dd9e4107836792b771aa6bda 2024-12-10T03:36:28,790 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cd750268dd9e4107836792b771aa6bda, entries=8, sequenceid=1992, filesize=5.5 K 2024-12-10T03:36:28,791 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/097e9adc78c54158934a1b3d3ad3348e as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/097e9adc78c54158934a1b3d3ad3348e 2024-12-10T03:36:28,794 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 097e9adc78c54158934a1b3d3ad3348e 2024-12-10T03:36:28,794 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/097e9adc78c54158934a1b3d3ad3348e, entries=167, sequenceid=1992, filesize=42.0 K 2024-12-10T03:36:28,794 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dd42089ef55c4a46958419d7e420e60c as hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd42089ef55c4a46958419d7e420e60c 2024-12-10T03:36:28,797 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33939/user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dd42089ef55c4a46958419d7e420e60c, entries=1, sequenceid=1992, filesize=5.0 K 2024-12-10T03:36:28,797 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(3040): Finished flush of dataSize ~721.72 KB/739045, heapSize ~883.80 KB/905008, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 888ms, sequenceid=1992, compaction requested=false 2024-12-10T03:36:28,799 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T03:36:28,799 DEBUG [M:0;50b9ef1c5472:42969 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T03:36:28,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36539 is added to blk_1073741830_1006 (size=869716) 2024-12-10T03:36:28,800 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/337388aa-9c0a-1c4b-f221-74ab05a7a34a/MasterData/WALs/50b9ef1c5472,42969,1733801610154/50b9ef1c5472%2C42969%2C1733801610154.1733801612875 not finished, retry = 0 2024-12-10T03:36:28,902 INFO [M:0;50b9ef1c5472:42969 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-10T03:36:28,902 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T03:36:28,902 INFO [M:0;50b9ef1c5472:42969 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42969 2024-12-10T03:36:28,917 DEBUG [M:0;50b9ef1c5472:42969 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/50b9ef1c5472,42969,1733801610154 already deleted, retry=false 2024-12-10T03:36:29,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T03:36:29,026 INFO [M:0;50b9ef1c5472:42969 {}] regionserver.HRegionServer(1307): Exiting; stopping=50b9ef1c5472,42969,1733801610154; zookeeper connection closed. 2024-12-10T03:36:29,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42969-0x1000e00254c0000, quorum=127.0.0.1:51621, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T03:36:29,036 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T03:36:29,040 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T03:36:29,040 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T03:36:29,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T03:36:29,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/hadoop.log.dir/,STOPPED} 2024-12-10T03:36:29,042 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T03:36:29,042 WARN [BP-727406365-172.17.0.2-1733801607156 heartbeating to localhost/127.0.0.1:33939 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T03:36:29,042 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T03:36:29,042 WARN [BP-727406365-172.17.0.2-1733801607156 heartbeating to localhost/127.0.0.1:33939 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-727406365-172.17.0.2-1733801607156 (Datanode Uuid f948c425-565d-4207-99fb-2ad82d3071d6) service to localhost/127.0.0.1:33939 2024-12-10T03:36:29,045 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/dfs/data/data1/current/BP-727406365-172.17.0.2-1733801607156 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T03:36:29,045 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/cluster_07d484bc-7e40-ad0f-7957-8e86fe7fd7a9/dfs/data/data2/current/BP-727406365-172.17.0.2-1733801607156 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T03:36:29,045 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T03:36:29,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T03:36:29,051 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T03:36:29,051 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T03:36:29,052 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T03:36:29,052 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e4f7ea8f-e477-bd47-711d-d3ebda0579f7/hadoop.log.dir/,STOPPED} 2024-12-10T03:36:29,066 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-10T03:36:29,168 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down